Python botocore 模块,exceptions() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用botocore.exceptions()

项目:awsmfa    作者:dcoker    | 项目源码 | 文件源码
def make_session(identity_profile):
    session = botocore.session.Session(profile=identity_profile)
    try:
        session3 = boto3.session.Session(botocore_session=session)
    except botocore.exceptions.ProfileNotFound as err:
        print(str(err), file=sys.stderr)
        if session.available_profiles:
            print("Available profiles: %s" %
                  ", ".join(sorted(session.available_profiles)), file=sys.stderr)
            print("You can specify a profile by passing it with the -i "
                  "command line flag.", file=sys.stderr)
        else:
            print("You have no AWS profiles configured. Please run 'aws "
                  "configure --profile identity' to get started.", file=sys.stderr)
        return None, None, USER_RECOVERABLE_ERROR
    return session, session3, None
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def file_exists(self, remote_path):
        """
        Check if the file we are trying to upload already exists in S3

        :param remote_path:
        :return: True, if file exists. False, otherwise
        """

        try:
            # Find the object that matches this ETag
            self.s3.head_object(
                Bucket=self.bucket_name, Key=remote_path)
            return True
        except botocore.exceptions.ClientError:
            # Either File does not exist or we are unable to get
            # this information.
            return False
项目:analytics-platform-ops    作者:ministryofjustice    | 项目源码 | 文件源码
def retry(fn, max_attempts=10, delay=0.200):
    attempts = 0
    while True:
        try:
            fn()
            break
        except botocore.exceptions.ClientError as error:
            # Only retry on boto's ClientError/NoSuchEntity error
            if error.response["Error"]["Code"] == "NoSuchEntity":
                LOG.warning(
                    "error while attaching role to policy: {}.".format(error))
                attempts += 1
                if attempts < max_attempts:
                    LOG.warning("Retry in {}s...".format(delay))
                    time.sleep(delay)
                else:
                    raise
            else:
                raise
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_create_role_raises_error_on_failure(self, stubbed_session):
        arn = 'good_arn' * 3
        role_id = 'abcd' * 4
        today = datetime.datetime.today()
        stubbed_session.stub('iam').create_role(
            RoleName='role_name',
            AssumeRolePolicyDocument=json.dumps({'trust': 'policy'})
        ).returns({'Role': {
            'RoleName': 'No', 'Arn': arn, 'Path': '/',
            'RoleId': role_id, 'CreateDate': today}}
        )
        stubbed_session.stub('iam').put_role_policy(
            RoleName='role_name',
            PolicyName='role_name',
            PolicyDocument={'policy': 'document'}
        ).raises_error(
            error_code='MalformedPolicyDocumentException',
            message='MalformedPolicyDocument'
        )
        stubbed_session.activate_stubs()
        awsclient = TypedAWSClient(stubbed_session)
        with pytest.raises(botocore.exceptions.ClientError):
            awsclient.create_role(
                'role_name', {'trust': 'policy'}, {'policy': 'document'})
        stubbed_session.verify_stubs()
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_create_function_fails_after_max_retries(self, stubbed_session):
        kwargs = {
            'FunctionName': 'name',
            'Runtime': 'python2.7',
            'Code': {'ZipFile': b'foo'},
            'Handler': 'app.app',
            'Role': 'myarn',
        }
        for _ in range(TypedAWSClient.LAMBDA_CREATE_ATTEMPTS):
            stubbed_session.stub('lambda').create_function(
                **kwargs).raises_error(
                error_code='InvalidParameterValueException',
                message=('The role defined for the function cannot '
                         'be assumed by Lambda.')
                )

        stubbed_session.activate_stubs()
        awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))
        with pytest.raises(LambdaClientError) as excinfo:
            awsclient.create_function('name', 'myarn', b'foo', 'python2.7',
                                      'app.app')
        assert isinstance(
            excinfo.value.original_error, botocore.exceptions.ClientError)
        stubbed_session.verify_stubs()
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_create_function_propagates_unknown_error(self, stubbed_session):
        kwargs = {
            'FunctionName': 'name',
            'Runtime': 'python2.7',
            'Code': {'ZipFile': b'foo'},
            'Handler': 'app.app',
            'Role': 'myarn',
        }
        stubbed_session.stub('lambda').create_function(
            **kwargs).raises_error(
            error_code='UnknownException', message='')
        stubbed_session.activate_stubs()
        awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))
        with pytest.raises(LambdaClientError) as excinfo:
            awsclient.create_function('name', 'myarn', b'foo', 'pytohn2.7',
                                      'app.app')
        assert isinstance(
            excinfo.value.original_error, botocore.exceptions.ClientError)
        stubbed_session.verify_stubs()
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_update_function_fails_after_max_retries(self, stubbed_session):
        stubbed_session.stub('lambda').update_function_code(
            FunctionName='name', ZipFile=b'foo').returns(
                {'FunctionArn': 'arn'})

        update_config_kwargs = {
            'FunctionName': 'name',
            'Role': 'role-arn'
        }
        for _ in range(TypedAWSClient.LAMBDA_CREATE_ATTEMPTS):
            stubbed_session.stub('lambda').update_function_configuration(
                **update_config_kwargs).raises_error(
                    error_code='InvalidParameterValueException',
                    message=('The role defined for the function cannot '
                             'be assumed by Lambda.'))
        stubbed_session.activate_stubs()
        awsclient = TypedAWSClient(stubbed_session, mock.Mock(spec=time.sleep))

        with pytest.raises(botocore.exceptions.ClientError):
            awsclient.update_function('name', b'foo', role_arn='role-arn')
        stubbed_session.verify_stubs()
项目:awscfncli    作者:Kotaimen    | 项目源码 | 文件源码
def boto3_exception_handler(f):
    """Capture and pretty print exceptions"""

    @wraps(f)
    def wrapper(*args, **kwargs):
        try:
            return f(*args, **kwargs)
        except (botocore.exceptions.ClientError,
                botocore.exceptions.WaiterError,
                botocore.exceptions.ParamValidationError,
                ConfigError) as e:
            click.secho(str(e), fg='red')
        except KeyboardInterrupt as e:
            click.secho('Aborted.', fg='red')

    return wrapper
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (c): # create dynamo DB tables
    try:
        print "INFO :: Creating %s Table....." % c['TableName']
        db_r.create_table(**c)
        print "INFO :: Waiting for completion..."
        db_r.Table(c['TableName']).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: WeatherstationInc %s Table exists, deleting ...." % c['TableName']
            db_r.Table(c['TableName']).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(c['TableName']).wait_until_not_exists()
            c_table (c)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (Table, t_config): # create dynamo DB tables
    """
    try to create table, if it errors tables exist,
    drop the tables, and then rerun the function to create again.
    """
    try:
        print "INFO :: Creating %s Table....." % Table
        db_r.create_table(
            AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
            TableName=Table,
            KeySchema = t_config[Table]['KeySchema'],
            ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
        )
        print "INFO :: Waiting for completion..."
        db_r.Table(Table).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: Learning Online %s Table exists, deleting ...." % Table
            db_r.Table(Table).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(Table).wait_until_not_exists()
            c_table (Table, t_config)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (Table, t_config): # create dynamo DB tables
    """
    try to create table, if it errors tables exist,
    drop the tables, and then rerun the function to create again.
    """
    try:
        print "INFO :: Creating %s Table....." % Table
        db_r.create_table(
            AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
            TableName=Table,
            KeySchema = t_config[Table]['KeySchema'],
            ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
        )
        print "INFO :: Waiting for completion..."
        db_r.Table(Table).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: Learning Online %s Table exists, deleting ...." % Table
            db_r.Table(Table).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(Table).wait_until_not_exists()
            c_table (Table, t_config)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (Table, t_config): # create dynamo DB tables
    """
    try to create table, if it errors tables exist,
    drop the tables, and then rerun the function to create again.
    """
    try:
        print "INFO :: Creating %s Table....." % Table
        db_r.create_table(
            AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
            TableName=Table,
            KeySchema = t_config[Table]['KeySchema'],
            ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
        )
        print "INFO :: Waiting for completion..."
        db_r.Table(Table).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: Learning Online %s Table exists, deleting ...." % Table
            db_r.Table(Table).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(Table).wait_until_not_exists()
            c_table (Table, t_config)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (Table, t_config): # create dynamo DB tables
    """
    try to create table, if it errors tables exist,
    drop the tables, and then rerun the function to create again.
    """
    try:
        print "INFO :: Creating %s Table....." % Table
        db_r.create_table(
            AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
            TableName=Table,
            KeySchema = t_config[Table]['KeySchema'],
            ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
        )
        print "INFO :: Waiting for completion..."
        db_r.Table(Table).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: Learning Online %s Table exists, deleting ...." % Table
            db_r.Table(Table).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(Table).wait_until_not_exists()
            c_table (Table, t_config)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table (Table, t_config): # create dynamo DB tables
    """
    try to create table, if it errors tables exist,
    drop the tables, and then rerun the function to create again.
    """
    try:
        print "INFO :: Creating %s Table....." % Table
        db_r.create_table(
            AttributeDefinitions = t_config[Table]['AttributeDefinitions'],
            TableName=Table,
            KeySchema = t_config[Table]['KeySchema'],
            ProvisionedThroughput=t_config[Table]['ProvisionedThroughput']
        )
        print "INFO :: Waiting for completion..."
        db_r.Table(Table).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            print "INFO :: Learning Online %s Table exists, deleting ...." % Table
            db_r.Table(Table).delete()
            print "INFO :: Waiting for delete.."
            db_r.Table(Table).wait_until_not_exists()
            c_table (Table, t_config)
        else:
            print "Unknown Error"
#------------------------------------------------------------------------------
项目:dependencies-resolver    作者:onfido    | 项目源码 | 文件源码
def get_object_md5_checksum(bucket, key):
    """This function returns the MD5 checksum for the remote file.
    If the file was uploaded as a single-part file, the MD5 checksum will be
    the checksum of the file content.
    However, if the file was uploaded as multi-part file,
    AWS is calculating the MD5 the following way (Based on AWS documentation):
        1. Calculate the MD5 md5_hash for each uploaded part of the file.
        2. Concatenate the hashes into a single binary string.
        3. Calculate the MD5 md5_hash of that result.
        4. Concatenate the resulted MD5 md5_hash with a dash
           and number of file parts.

    :param bucket: The name of the bucket.
    :param key: The full path to the remote file.
    :return: The MD5 checksum for the remote file.
    """
    try:
        md5_checksum = s3_client.head_object(
            Bucket=bucket,
            Key=key
        )['ETag'][1:-1]
    except botocore.exceptions.ClientError:
        md5_checksum = ''
    return md5_checksum
项目:aws-pcf-quickstart    作者:cf-platform-eng    | 项目源码 | 文件源码
def delete_bucket(bucket_name: str, region: str, key: str, secret: str):
    print("Deleting bucket {}".format(bucket_name))
    s3_client = boto3.client(
        service_name='s3',
        region_name=region,
        aws_access_key_id=key,
        aws_secret_access_key=secret
    )
    try:
        contents = s3_client.list_objects(Bucket=bucket_name).get('Contents')
        while contents is not None:
            delete_keys = [{'Key': o.get('Key')} for o in contents]
            s3_client.delete_objects(Bucket=bucket_name, Delete={
                'Objects': delete_keys
            })
            contents = s3_client.list_objects(Bucket=bucket_name).get('Contents')
        s3_client.delete_bucket(Bucket=bucket_name)
    except botocore.exceptions.ClientError as e:
        error = e.response.get('Error')
        if not error or error.get('Code') != 'NoSuchBucket':
            raise e
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def get_subnets(self):
        try:
            client = boto3.client('ec2',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EC2: %s" % e)

        # Search EC2 for the VPC subnets
        try:
            return client.describe_subnets()
        except botocore.exceptions.ClientError as e:
            raise AWSException("There was an error describing the VPC Subnets: %s" %
                               e.response["Error"]["Message"])
        except botocore.exceptions.ParamValidationError as e:
            raise AWSException("There was an error describing the VPC Subnets: %s" % e)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def list_clusters(self):
        try:
            client = boto3.client('emr',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EMR: %s" % e)

        try:
            cluster_list = client.list_clusters()
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            else:
                raise AWSException("There was an error creating a new EMR cluster: %s" %
                                   e.response["Error"]["Message"])
        except Exception as e:
            raise AWSException("Unknown Error: %s" % e)

        return cluster_list
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def describe_cluster(self, cluster_id):
        try:
            client = boto3.client('emr',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EMR: %s" % e)

        try:
            return client.describe_cluster(ClusterId=cluster_id)
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            else:
                raise AWSException("There was an error describing the EMR cluster: %s" %
                                   e.response["Error"]["Message"])
        except Exception as e:
            raise AWSException("Unknown Error: %s" % e)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def list_bootstrap_actions(self, cluster_id):
        try:
            client = boto3.client('emr',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EMR: %s" % e)

        try:
            return client.list_bootstrap_actions(ClusterId=cluster_id)
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            else:
                raise AWSException("There was an error describing the EMR cluster: %s" %
                                   e.response["Error"]["Message"])
        except Exception as e:
            raise AWSException("Unknown Error: %s" % e)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def terminate_cluster(self, cluster_id):
        try:
            client = boto3.client('emr',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EMR: %s" % e)

        try:
            client.terminate_job_flows(JobFlowIds=[cluster_id])
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            else:
                raise AWSException("There was an error terminating the EMR cluster: %s" %
                                   e.response["Error"]["Message"])
        except Exception as e:
            raise AWSException("Unknown Error: %s" % e)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def get_security_group_port_open(self, security_group_id, port):
        try:
            client = boto3.client('ec2',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EC2: %s" % e)

        try:
            response = client.describe_security_groups(GroupIds=[security_group_id])

            # Loop through all of the security group permissions and if the port
            for ip_permission in response["SecurityGroups"][0]["IpPermissions"]:
                if ip_permission["FromPort"] == port and ip_permission["ToPort"] == port:
                    return True
            return False
        except botocore.exceptions.ClientError as e:
            raise AWSException("There was an error describing the security group: %s" %
                               e.response["Error"]["Message"])
项目:awsmfa    作者:dcoker    | 项目源码 | 文件源码
def one_mfa(args, credentials):
    session, session3, err = make_session(args.identity_profile)
    if err:
        return err

    if "AWSMFA_TESTING_MODE" in os.environ:
        use_testing_credentials(args, credentials)
        return OK

    mfa_args = {}
    if args.token_code != 'skip':
        serial_number, token_code, err = acquire_code(args, session, session3)
        if err is not OK:
            return err
        mfa_args['SerialNumber'] = serial_number
        mfa_args['TokenCode'] = token_code

    sts = session3.client('sts')
    try:
        if args.role_to_assume:
            mfa_args.update(
                DurationSeconds=min(args.duration, 3600),
                RoleArn=args.role_to_assume,
                RoleSessionName=args.role_session_name)
            response = sts.assume_role(**mfa_args)
        else:
            mfa_args.update(DurationSeconds=args.duration)
            response = sts.get_session_token(**mfa_args)
    except botocore.exceptions.ClientError as err:
        if err.response["Error"]["Code"] == "AccessDenied":
            print(str(err), file=sys.stderr)
            return USER_RECOVERABLE_ERROR
        else:
            raise
    print_expiration_time(response['Credentials']['Expiration'])
    update_credentials_file(args.aws_credentials,
                            args.target_profile,
                            args.identity_profile,
                            credentials,
                            response['Credentials'])
    return OK
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def wait(stack, show_events=False, last_event=None):
    """wait for stack action to complete"""
    global REGION

    stack_obj = boto3.resource('cloudformation', region_name=REGION).Stack(stack)
    while True:
        try:
            stack_obj.reload()

            # display new events
            if show_events:
                last_event = stack_events(stack, last_event=last_event)

            # exit condition
            if stack_obj.stack_status[-8:] == 'COMPLETE':
                break
            if stack_obj.stack_status == 'DELETE_FAILED':
                break

        except botocore.exceptions.ClientError:
            break

        # limit requests to API
        sleep(5)


# command functions
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def validate(args):
    """validate local stack(s)"""
    stacks = args.stack

    # validate all stacks
    if args.all:
        stacks = local_stacks()

    # filter for existing stacks
    elif stacks:
        stacks = [stack for stack in stacks if stack in local_stacks()]

    # bail if no stack to validate
    if not stacks:
        LOG.warning(
            'this command needs a list of local stacks, or the --all flag to validate all stacks')
        sys.exit(1)

    # action
    cfn = get_cfn()
    retval = 0
    for stack in stacks:
        tpl_body = load_template(stack, True)
        try:
            cfn.validate_template(TemplateBody=tpl_body)
            res = 'ok'
        except botocore.exceptions.ClientError as err:
            res = 'not ok: %s' % str(err)
            retval = 1
        print('%s:%s %s' % (stack, ''.rjust(max([len(s) for s in stacks]) - len(stack)), res))

    sys.exit(retval)
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:DevOps    作者:YoLoveLife    | 项目源码 | 文件源码
def _botocore_exception_maybe():
    """
    Allow for boto3 not being installed when using these utils by wrapping
    botocore.exceptions instead of assigning from it directly.
    """
    if HAS_BOTO3:
        return botocore.exceptions.ClientError
    return type(None)
项目:analytics-platform-ops    作者:ministryofjustice    | 项目源码 | 文件源码
def detach_bucket_policies(event, context):
    """
    Detaches the team bucket IAM policies from the user's IAM role

    event = {
        "user": {"username": "alice"},
        "team": {"slug": "justice-league"}
    }
    """

    username = event["user"]["username"]
    team_slug = event["team"]["slug"]

    client = boto3.client("iam")
    errors = []
    for policy_type in [POLICY_READ_WRITE, POLICY_READ_ONLY]:
        # Be sure we detach all policies without stopping early
        try:
            client.detach_role_policy(
                RoleName=naming.role_name(username),
                PolicyArn=policy_arn(team_slug, policy_type),
            )
        except botocore.exceptions.ClientError as error:
            # Ignoring this error raised when detaching a policy not attached
            if error.response["Error"]["Code"] != "NoSuchEntity":
                errors.append(error)
        except Exception as error:
            # Other exceptions are saved and raised after the loop
            errors.append(error)

    if errors:
        message = "One or more errors occurred while detaching policies from role: {}".format(
            errors)
        LOG.error(message)
        raise Exception(message)
项目:aws-ec2rescue-linux    作者:awslabs    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_lambda_function_bad_error_propagates(self, stubbed_session):
        stubbed_session.stub('lambda').get_function(FunctionName='myappname')\
                .raises_error(error_code='UnexpectedError',
                              message='Unknown')

        stubbed_session.activate_stubs()

        awsclient = TypedAWSClient(stubbed_session)
        with pytest.raises(botocore.exceptions.ClientError):
            awsclient.lambda_function_exists(name='myappname')

        stubbed_session.verify_stubs()
项目:chalice    作者:aws    | 项目源码 | 文件源码
def test_unexpected_error_is_propagated(self, stubbed_session):
        stubbed_session.stub('iam').get_role(RoleName='Yes').raises_error(
            error_code='InternalError',
            message='Foo')
        stubbed_session.activate_stubs()
        awsclient = TypedAWSClient(stubbed_session)
        with pytest.raises(botocore.exceptions.ClientError):
            awsclient.get_role_arn_for_name(name='Yes')
        stubbed_session.verify_stubs()
项目:Zappa    作者:Miserlou    | 项目源码 | 文件源码
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
        """
        Copies src file to destination within a bucket.
        """
        try:
            self.s3_client.head_bucket(Bucket=bucket_name)
        except botocore.exceptions.ClientError as e:  # pragma: no cover
            # If a client error is thrown, then check that it was a 404 error.
            # If it was a 404 error, then the bucket does not exist.
            error_code = int(e.response['Error']['Code'])
            if error_code == 404:
                return False

        copy_src = {
            "Bucket": bucket_name,
            "Key": src_file_name
        }
        try:
            self.s3_client.copy(
                CopySource=copy_src,
                Bucket=bucket_name,
                Key=dst_file_name
            )
            return True
        except botocore.exceptions.ClientError:  # pragma: no cover
            return False
项目:Zappa    作者:Miserlou    | 项目源码 | 文件源码
def remove_from_s3(self, file_name, bucket_name):
        """
        Given a file name and a bucket, remove it from S3.

        There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.

        Returns True on success, False on failure.

        """
        try:
            self.s3_client.head_bucket(Bucket=bucket_name)
        except botocore.exceptions.ClientError as e:  # pragma: no cover
            # If a client error is thrown, then check that it was a 404 error.
            # If it was a 404 error, then the bucket does not exist.
            error_code = int(e.response['Error']['Code'])
            if error_code == 404:
                return False

        try:
            self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
            return True
        except botocore.exceptions.ClientError:  # pragma: no cover
            return False

    ##
    # Lambda
    ##
项目:Zappa    作者:Miserlou    | 项目源码 | 文件源码
def delete_rule(self, rule_name):
        """
        Delete a CWE rule.

        This  deletes them, but they will still show up in the AWS console.
        Annoying.

        """
        logger.debug('Deleting existing rule {}'.format(rule_name))

        # All targets must be removed before
        # we can actually delete the rule.
        try:
            targets = self.events_client.list_targets_by_rule(Rule=rule_name)
        except botocore.exceptions.ClientError as e:
            # This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
            error_code = e.response['Error']['Code']
            if error_code == 'AccessDeniedException':
                raise
            else:
                logger.debug('No target found for this rule: {} {}'.format(rule_name, e.args[0]))
                return

        if 'Targets' in targets and targets['Targets']:
            self.events_client.remove_targets(Rule=rule_name, Ids=[x['Id'] for x in targets['Targets']])
        else:  # pragma: no cover
            logger.debug('No target to delete')

        # Delete our rule.
        self.events_client.delete_rule(Name=rule_name)
项目:Zappa    作者:Miserlou    | 项目源码 | 文件源码
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
        """
        Create the DynamoDB table for async task return values
        """
        try:
            dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
            return False, dynamodb_table

        # catch this exception (triggered if the table doesn't exist)
        except botocore.exceptions.ClientError:
            dynamodb_table = self.dynamodb_client.create_table(
                AttributeDefinitions=[
                    {
                        'AttributeName': 'id',
                        'AttributeType': 'S'
                    }
                ],
                TableName=table_name,
                KeySchema=[
                    {
                        'AttributeName': 'id',
                        'KeyType': 'HASH'
                    },
                ],
                ProvisionedThroughput = {
                    'ReadCapacityUnits': read_capacity,
                    'WriteCapacityUnits': write_capacity
                }
            )
            if dynamodb_table:
                try:
                    self._set_async_dynamodb_table_ttl(table_name)
                except botocore.exceptions.ClientError:
                    # this fails because the operation is async, so retry
                    time.sleep(10)
                    self._set_async_dynamodb_table_ttl(table_name)

        return True, dynamodb_table
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table(TableName, **kwargs): # handles the creation of a tabale with error checking
                                # kwargs optionally passes in BOTO3 sessions for multithreading
    try:
        db_r.create_table(**t_conf(TableName))
        print "INFO :: Waiting for Table [%s] to complete..." % TableName
        db_r.Table(TableName).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            db_r.Table(TableName).delete()
            print "INFO :: Learning Online %s Table exists, waiting for delete ...." % TableName
            db_r.Table(TableName).wait_until_not_exists()
            c_table(TableName)
        else:
            raise
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table(TableName, **kwargs): # handles the creation of a tabale with error checking
                                # kwargs optionally passes in BOTO3 sessions for multithreading
    try:
        db_r.create_table(**t_conf(TableName))
        print "INFO :: Waiting for Table [%s] to complete..." % TableName
        db_r.Table(TableName).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            db_r.Table(TableName).delete()
            print "INFO :: Learning Online %s Table exists, waiting for delete ...." % TableName
            db_r.Table(TableName).wait_until_not_exists()
            c_table(TableName)
        else:
            raise
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table(TableName, **kwargs): # handles the creation of a tabale with error checking
                                # kwargs optionally passes in BOTO3 sessions for multithreading
    try:
        db_r.create_table(**t_conf(TableName))
        print "INFO :: Waiting for Table [%s] to complete..." % TableName
        db_r.Table(TableName).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            db_r.Table(TableName).delete()
            print "INFO :: Learning Online %s Table exists, waiting for delete ...." % TableName
            db_r.Table(TableName).wait_until_not_exists()
            c_table(TableName)
        else:
            raise
#------------------------------------------------------------------------------
项目:aCloudGuru-DynamoDB    作者:acantril    | 项目源码 | 文件源码
def c_table(TableName, **kwargs): # handles the creation of a tabale with error checking
                                # kwargs optionally passes in BOTO3 sessions for multithreading
    try:
        db_r.create_table(**t_conf(TableName))
        print "INFO :: Waiting for Table [%s] to complete..." % TableName
        db_r.Table(TableName).wait_until_exists()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "ResourceInUseException":
            db_r.Table(TableName).delete()
            print "INFO :: Learning Online %s Table exists, waiting for delete ...." % TableName
            db_r.Table(TableName).wait_until_not_exists()
            c_table(TableName)
        else:
            raise
#------------------------------------------------------------------------------
项目:jepsen-training-vpc    作者:bloomberg    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:AWS-AutoTag    作者:cpollard0    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def test_credentials(self):
        try:
            boto3.client('sts',
                         aws_access_key_id=self.access_key_id,
                         aws_secret_access_key=self.secret_access_key,
                         region_name=self.region_name).get_caller_identity()['Arn']
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure" or \
                    e.response["Error"]["Code"] == "InvalidClientTokenId":
                raise AWSException("Invalid AWS access key id or aws secret access key")
        except Exception as e:
            raise AWSException(str(e))
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def test_ssh_key(self, key_name, identity_file):
        client = None

        self.key_name = key_name
        self.identity_file = identity_file

        try:
            client = boto3.client('ec2',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
        except Exception as e:
            raise AWSException("There was an error connecting to EC2: %s" % e)

        # Search EC2 for the key-name
        try:
            client.describe_key_pairs(KeyNames=[self.key_name])
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            elif e.response["Error"]["Code"] == "InvalidKeyPair.NotFound":
                raise AWSException("Key %s not found on AWS" % self.key_name)
            else:
                raise AWSException("There was an error describing the SSH key pairs: %s" %
                                   e.response["Error"]["Message"])

        # Verify the identity file exists
        if not os.path.isfile(self.identity_file):
            raise AWSException("Key identity file %s not found" % self.identity_file)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def create_ssh_key(self, email_address, file_path):
        try:
            client = boto3.client('ec2',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EC2: %s" % e)

        self.key_name = "%s_%s_%s" % (str(email_address.split("@")[0]),
                                      str(socket.gethostname()),
                                      str(int(time.time())))

        self.identity_file = file_path + "/" + self.key_name + ".pem"

        # Create an EC2 key pair
        try:
            key = client.create_key_pair(KeyName=self.key_name)
            with open(self.identity_file, 'a') as out:
                out.write(key['KeyMaterial'] + '\n')
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] == "AuthFailure":
                raise AWSException("Invalid AWS access key id or aws secret access key")
            else:
                raise AWSException("There was an error creating a new SSH key pair: %s" %
                                   e.response["Error"]["Message"])
        except Exception as e:
            raise AWSException("Unknown Error: %s" % e)

        # Verify the key pair was saved locally
        if not os.path.isfile(self.identity_file):
            raise AWSException("SSH key %s not saved" % self.identity_file)
项目:spark-notebook    作者:mas-dse    | 项目源码 | 文件源码
def get_account_id(self):
        try:
            client = boto3.client('sts',
                                  aws_access_key_id=self.access_key_id,
                                  aws_secret_access_key=self.secret_access_key,
                                  region_name=self.region_name)
        except Exception as e:
            raise AWSException("There was an error connecting to EC2: %s" % e)

        try:
            return client.get_caller_identity()["Account"]
        except botocore.exceptions.ClientError as e:
            raise AWSException("There was an error getting the Account ID: %s" %
                               e.response["Error"]["Message"])
项目:tf_aws_ecs_instance_draining_on_scale_in    作者:terraform-community-modules    | 项目源码 | 文件源码
def check_for_200_error(response, **kwargs):
    # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
    # There are two opportunities for a copy request to return an error. One
    # can occur when Amazon S3 receives the copy request and the other can
    # occur while Amazon S3 is copying the files. If the error occurs before
    # the copy operation starts, you receive a standard Amazon S3 error. If the
    # error occurs during the copy operation, the error response is embedded in
    # the 200 OK response. This means that a 200 OK response can contain either
    # a success or an error. Make sure to design your application to parse the
    # contents of the response and handle it appropriately.
    #
    # So this handler checks for this case.  Even though the server sends a
    # 200 response, conceptually this should be handled exactly like a
    # 500 response (with respect to raising exceptions, retries, etc.)
    # We're connected *before* all the other retry logic handlers, so as long
    # as we switch the error code to 500, we'll retry the error as expected.
    if response is None:
        # A None response can happen if an exception is raised while
        # trying to retrieve the response.  See Endpoint._get_response().
        return
    http_response, parsed = response
    if _looks_like_special_case_error(http_response):
        logger.debug("Error found for response with 200 status code, "
                     "errors: %s, changing status code to "
                     "500.", parsed)
        http_response.status_code = 500
项目:clouds-aws    作者:elias5000    | 项目源码 | 文件源码
def update(args):
    """update or create a stack in AWS."""
    stack = args.stack

    if stack not in local_stacks():
        LOG.error('no such stack: ' + stack)
        return

    if stack not in remote_stacks().keys() and not args.create_missing:
        LOG.warning(
            'stack ' + stack + ' does not exist in AWS, add --create_missing to create a new stack')
        return

    # read template and parameters
    tpl_body = load_template(stack, True)
    params = load_parameters(stack)

    # action
    cfn = get_cfn()
    last_event = None

    try:
        if stack in remote_stacks().keys():
            LOG.info('updating stack %s', stack)
            last_event = fetch_all_stack_events(stack)[-1]['Timestamp']
            stack_id = cfn.update_stack(
                StackName=stack,
                TemplateBody=tpl_body,
                Parameters=params,
                Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
            )['StackId']
            LOG.info('created stack with physical id %s', stack_id)
        else:
            LOG.info('creating stack %s', stack)
            stack_id = cfn.create_stack(
                StackName=stack,
                TemplateBody=tpl_body,
                Parameters=params,
                Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
            )['StackId']
            LOG.info('created stack with physical id %s', stack_id)
    except botocore.exceptions.ClientError as err:
        LOG.warning(str(err))
        return
    except botocore.exceptions.ParamValidationError as err:
        LOG.warning(str(err))
        return

    # synchronous mode
    if args.wait or args.events:
        wait(stack, show_events=args.events, last_event=last_event)
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def get_auth_instance(self, signing_name, region_name,
                          signature_version=None, **kwargs):
        """
        Get an auth instance which can be used to sign a request
        using the given signature version.

        :type signing_name: string
        :param signing_name: Service signing name. This is usually the
                             same as the service name, but can differ. E.g.
                             ``emr`` vs. ``elasticmapreduce``.

        :type region_name: string
        :param region_name: Name of the service region, e.g. ``us-east-1``

        :type signature_version: string
        :param signature_version: Signature name like ``v4``.

        :rtype: :py:class:`~botocore.auth.BaseSigner`
        :return: Auth instance to sign a request.
        """
        if signature_version is None:
            signature_version = self._signature_version

        cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
        if cls is None:
            raise UnknownSignatureVersionError(
                signature_version=signature_version)
        # If there's no credentials provided (i.e credentials is None),
        # then we'll pass a value of "None" over to the auth classes,
        # which already handle the cases where no credentials have
        # been provided.
        frozen_credentials = None
        if self._credentials is not None:
            frozen_credentials = self._credentials.get_frozen_credentials()
        kwargs['credentials'] = frozen_credentials
        if cls.REQUIRES_REGION:
            if self._region_name is None:
                raise botocore.exceptions.NoRegionError()
            kwargs['region_name'] = region_name
            kwargs['service_name'] = signing_name
        auth = cls(**kwargs)
        return auth

    # Alias get_auth for backwards compatibility.
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def upload(self, file_name, remote_path):
        """
        Uploads given file to S3
        :param file_name: Path to the file that will be uploaded
        :param remote_path:  be uploaded
        :return: VersionId of the latest upload
        """

        if self.prefix and len(self.prefix) > 0:
            remote_path = "{0}/{1}".format(self.prefix, remote_path)

        # Check if a file with same data exists
        if not self.force_upload and self.file_exists(remote_path):
            LOG.debug("File with same data is already exists at {0}. "
                      "Skipping upload".format(remote_path))
            return self.make_url(remote_path)

        try:

            # Default to regular server-side encryption unless customer has
            # specified their own KMS keys
            additional_args = {
                "ServerSideEncryption": "AES256"
            }

            if self.kms_key_id:
                additional_args["ServerSideEncryption"] = "aws:kms"
                additional_args["SSEKMSKeyId"] = self.kms_key_id

            print_progress_callback = \
                ProgressPercentage(file_name, remote_path)
            future = self.transfer_manager.upload(file_name,
                                                  self.bucket_name,
                                                  remote_path,
                                                  additional_args,
                                                  [print_progress_callback])
            future.result()

            return self.make_url(remote_path)

        except botocore.exceptions.ClientError as ex:
            error_code = ex.response["Error"]["Code"]
            if error_code == "NoSuchBucket":
                raise exceptions.NoSuchBucketError(
                        bucket_name=self.bucket_name)
            raise ex