Python boto3 模块,client() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用boto3.client()

项目:aws-consolidated-admin    作者:awslabs    | 项目源码 | 文件源码
def lambda_handler(event, context):
    sess = boto3.session.Session(
        aws_access_key_id=event['Credentials']['AccessKeyId'],
        aws_secret_access_key=decrypt(
            event['Credentials']['SecretAccessKeyCiphertext']),
        aws_session_token=event['Credentials']['SessionToken'],
        region_name=event['Region'])

    cfn = sess.client('cloudformation')

    return cfn.create_stack(
        TemplateURL=event['TemplateURL'],
        StackName=event['Stack']['StackName'],
        Capabilities=event.get('Capabilities', []),
        Parameters=format_parameters(event['Parameters']),
        OnFailure='DO_NOTHING')
项目:deployfish    作者:caltechads    | 项目源码 | 文件源码
def _render_write(self):
        """
        Create an list of keyword parameters suitable for passing to
        ``boto3.client('ssm').put_parameter()``.

        :rtype: dict
        """
        d = {}
        d['Name'] = "{}.{}.{}".format(self.cluster, self.service, self.key)
        d['Value'] = self.value
        d['Overwrite'] = True
        if self.is_secure:
            d['Type'] = 'SecureString'
            if self.kms_key_id:
                d['KeyId'] = self.kms_key_id
        else:
            d['Type'] = 'String'
        return d
项目:aws-consolidated-admin    作者:awslabs    | 项目源码 | 文件源码
def lambda_handler(event, context):
    sess = boto3.session.Session(
        aws_access_key_id=event['Credentials']['AccessKeyId'],
        aws_secret_access_key=decrypt(
            event['Credentials']['SecretAccessKeyCiphertext']),
        aws_session_token=event['Credentials']['SessionToken'],
        region_name=event['Region'])

    cfn = sess.client('cloudformation')

    try:
        resp = cfn.update_stack(
            TemplateURL=event['TemplateURL'],
            StackName=event['Stack']['StackName'],
            Capabilities=event.get('Capabilities', []),
            Parameters=format_parameters(event['Parameters']))
    except botocore.exceptions.ClientError as e:
        if e.message.endswith('No updates are to be performed.'):
            return {'Warning': 'NOTHING_TO_UPDATE'}

        raise e

    resp['Warning'] = 'NONE' # Ew

    return resp
项目:greeter-service-example    作者:daniel-rhoades    | 项目源码 | 文件源码
def __init__(self, module):
        self.module = module

        try:
            # self.ecs = boto3.client('ecs')
            region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
            if not region:
                module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
            self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
        except boto.exception.NoAuthHandlerFound, e:
            self.module.fail_json(msg="Can't authorize connection - "+str(e))

    # def list_clusters(self):
    #     return self.client.list_clusters()
    # {'failures=[],
    # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
    # 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]}
    # {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}],
    # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
    # 'clusters=[]}
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _create_lambda(arn, func_name, func_desc, lambda_handler, lambda_main,
                   runtime):
    func = dict()
    lamb = boto3.client('lambda')
    with open(temp_deploy_zip) as deploy:
        func['ZipFile'] = deploy.read()
    try:
        resp = lamb.create_function(
            FunctionName=func_name, Runtime=runtime, Publish=True,
            Description=func_desc,
            Role=arn, Code=func, Handler='{0}.{1}'.format(
                lambda_main, lambda_handler
            ))
        logging.info("Create Lambda Function resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating function '{1}'.".format(
                ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _create_function_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')

    try:
        resp = lamb.create_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        logging.info("Create Lambda Alias resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating alias '{1}'.".format(
                ce, func_alias))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _update_lambda_function(zip_file, func_name):
    lamb = boto3.client('lambda')
    try:
        resp = lamb.update_function_code(
            FunctionName=func_name,
            ZipFile=zip_file.read(),
            Publish=True
        )
        return resp['Version']
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning(
                "Validation Error {0} updating function '{1}'.".format(
                    ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:AWS-SG-IP-Updater    作者:prasket    | 项目源码 | 文件源码
def add_ip(current_ip, sg_id, port, protocol):
    """Add current IP to the security group"""

    # setup client for ec2
    client = boto3.client("ec2")

    # execute security group ingress Boto3 commands
    # TODO: Add in try for graceful error handling
    response = client.authorize_security_group_ingress(
        GroupId=sg_id,
        IpProtocol=protocol,
        FromPort=port,
        ToPort=port,
        CidrIp=current_ip
    )
    print response
项目:AWS-SG-IP-Updater    作者:prasket    | 项目源码 | 文件源码
def remove_ip(current_ip, sg_id, port, protocol):
    """remove current IP from the security group"""

    # setup client for ec2
    client = boto3.client("ec2")

    # execute security group revoke ingress Boto3 commands
    response = client.revoke_security_group_ingress(
        GroupId=sg_id,
        IpProtocol=protocol,
        FromPort=port,
        ToPort=port,
        CidrIp=current_ip
    )
    print response

# Define the usage of the app
项目:sm-engine-ansible    作者:METASPACE2020    | 项目源码 | 文件源码
def send_to_sns(subject, message):
    if aws_sns_arn is None:
        return

    print("Sending notification to: %s" % aws_sns_arn)

    client = boto3.client('sns')

    response = client.publish(
        TargetArn=aws_sns_arn,
        Message=message,
        Subject=subject)

    if 'MessageId' in response:
        print("Notification sent with message id: %s" % response['MessageId'])
    else:
        print("Sending notification failed with response: %s" % str(response))
项目:sm-engine-ansible    作者:METASPACE2020    | 项目源码 | 文件源码
def send_to_sns(subject, message):
    if aws_sns_arn is None:
        return

    print("Sending notification to: %s" % aws_sns_arn)

    client = boto3.client('sns')

    response = client.publish(
        TargetArn=aws_sns_arn,
        Message=message,
        Subject=subject)

    if 'MessageId' in response:
        print("Notification sent with message id: %s" % response['MessageId'])
    else:
        print("Sending notification failed with response: %s" % str(response))
项目:deployfish    作者:caltechads    | 项目源码 | 文件源码
def __init__(self, clusterName, yml={}):
        """
        :param clusterName: the name of the cluster in which we'll run our
                            helper tasks
        :type clusterName: string

        :param yml: the task definition information for the task from our
                    deployfish.yml file
        :type yml: dict
        """
        self.clusterName = clusterName
        self.ecs = boto3.client('ecs')
        self.commands = {}
        self.from_yaml(yml)
        self.desired_task_definition = TaskDefinition(yml=yml)
        self.active_task_definition = None
项目:deployfish    作者:caltechads    | 项目源码 | 文件源码
def new(service, cluster, yml=None):
        """
        Returns a list of parameters.
        :param service:
        :param cluster:
        :param yml:
        :return: list
        """
        if yml:
            m = ParameterFactory.WILDCARE_RE.search(yml)
            if m:
                parameter_list = []
                ssm = boto3.client('ssm')
                response = ssm.describe_parameters(Filters=[{'Key': 'Name', 'Values': [m.group('key')]}], MaxResults=50)
                parms = response['Parameters']
                for parm in parms:
                    if parm['Type'] == 'SecureString':
                        line = "{}:external:secure:{}".format(parm['Name'], parm['KeyId'])
                    else:
                        line = "{}:external".format(parm['Name'])
                    parameter_list.append(Parameter(service, cluster, yml=line))
                return parameter_list

        return [Parameter(service, cluster, yml=yml)]
项目:aws-tailor    作者:alanwill    | 项目源码 | 文件源码
def cleanup_resources(la_credentials, regions):

    # Clean up resources
    try:
        for region in regions:
            laCloudtrail = boto3.client(
                'cloudtrail',
                region_name=region,
                aws_access_key_id=la_credentials[0],
                aws_secret_access_key=la_credentials[1],
                aws_session_token=la_credentials[2],
            )
            describeTrail = laCloudtrail.describe_trails()
            for trail in describeTrail['trailList']:
                deleteTrail = laCloudtrail.delete_trail(
                    Name=trail['TrailARN']
                )
                print(deleteTrail)
    except Exception as e:
        print(e)
        print("No trails to delete")

    return
项目:aws-tailor    作者:alanwill    | 项目源码 | 文件源码
def check_trails(la_credentials, s3_bucket):

    laCloudtrail = boto3.client(
        'cloudtrail',
        region_name='us-east-1',
        aws_access_key_id=la_credentials[0],
        aws_secret_access_key=la_credentials[1],
        aws_session_token=la_credentials[2],
    )

    checkTrail = laCloudtrail.describe_trails(
        trailNameList=['default'],
    )

    if len(checkTrail['trailList']) == 1 \
            and checkTrail['trailList'][0]['IsMultiRegionTrail'] is True \
            and checkTrail['trailList'][0]['S3BucketName'] == s3_bucket:
        return True
    else:
        return False
项目:aws-tailor    作者:alanwill    | 项目源码 | 文件源码
def get_la_vpc_id(la_credentials, region):

    # Lookup vpcid
    laCfn = boto3.client(
        'cloudformation',
        region_name=region,
        aws_access_key_id=la_credentials[0],
        aws_secret_access_key=la_credentials[1],
        aws_session_token=la_credentials[2],
    )
    # Look up CFN stack Outputs
    getStack = laCfn.describe_stacks(
        StackName='core'
    )

    # Extract vpc id
    laVpcId = None
    cfnOutput = getStack['Stacks'][0]['Outputs']
    for i in cfnOutput:
        if i['OutputKey'] == "VPC":
            laVpcId = i['OutputValue']
        else:
            continue

    return laVpcId
项目:cfpp    作者:dcoker    | 项目源码 | 文件源码
def test_kms(self):
        if "CFPP_RUN_KMS_TESTS" not in os.environ:
            return

        import boto3
        import botocore

        output = subprocess.check_output(["cfpp", "-s", "tests",
                                          "tests/kms_test.template"])
        parsed = json.loads(output)["Parameters"]
        without_context = parsed["EncryptedValue"]["Default"]
        with_context = parsed["EncryptedValueWithContext"]["Default"]

        kms = boto3.client('kms')
        kms.decrypt(CiphertextBlob=base64.b64decode(without_context))
        try:
            kms.decrypt(CiphertextBlob=with_context)
            self.fail("expected KMS to fail due to lack of context")
        except botocore.exceptions.ClientError:
            pass

        kms.decrypt(CiphertextBlob=base64.b64decode(with_context),
                    EncryptionContext={"ContextKey": "ContextValue"})
项目:Falco    作者:VCCRI    | 项目源码 | 文件源码
def set_mapper_number(manifest_file):
    fastq_counts = 0

    if manifest_file.startswith("s3://"):
        s3 = boto3.resource("s3")

        bucket_name, key_prefix = manifest_file.strip().strip("/")[5:].split("/", 1)

        with tempfile.TemporaryDirectory() as tmpdirname:
            s3.meta.client.download_file(bucket_name, key_prefix, tmpdirname + "/manifest")

            for line in open(tmpdirname+"/manifest"):
                fastq_counts += 1
    else:
        for line in open(manifest_file):
            fastq_counts += 1

    return fastq_counts
项目:Falco    作者:VCCRI    | 项目源码 | 文件源码
def upload_files_to_s3(file_list, dry_run=False):
    """
    uploads files to an AWS S3 bucket
    :param file_list: list of files to be uploaded
    :param dry_run: a boolean flag for dry-run; no upload if set to False
    :return: a comma separated list of upload files
   """
    s3_client = boto3.client("s3")
    uploaded_files = []

    for name, local_dir, s3_dest in file_list:
        file_location = local_dir.rstrip("/") + "/" + name
        bucket_name, key_prefix = s3_dest.strip().strip("/")[5:].split("/", 1)

        if not dry_run:
            s3_client.upload_file(file_location, bucket_name, key_prefix + "/" + name)

        uploaded_files.append(s3_dest.rstrip("/") + "/" + name)

    return ",".join(uploaded_files)
项目:Falco    作者:VCCRI    | 项目源码 | 文件源码
def is_valid_s3_bucket(s3_string):
    """
    Determine if the input string starts with a valid s3 bucket name
    :param s3_string: an aws s3 address (e.g. s3://mybucket/other...)
    :return: True if the s3_string contains a valid bucket name
    """
    client = boto3.client('s3')
    # only applies to s3 - so ignore otherwise
    if s3_string[0:5] != 's3://':
        return False
    # get the bucket name
    bucket = s3_string[5:].strip('/').split('/')[0]
    if not bucket:
        return False
    # see if bucket exists
    try:
        client.list_objects(Bucket=bucket)
    except:
        return False

    return True
项目:Falco    作者:VCCRI    | 项目源码 | 文件源码
def check_s3_file_exists(s3_path, file_name):
    """
    Determine if a s3 key exists
    :param s3_path: an s3 "directory" path (e.g. s3://mybucket/name/)
    :param file_name: a pathless file name (e.g. myfile.txt)
    :return: True if key exists; False otherwise
    """
    full_path = s3_path.rstrip('/') + '/' + file_name
    bucket_name, key_prefix = full_path[5:].split("/", 1)
    client = boto3.client('s3')
    # see if file exists
    try:
        client.get_object(Bucket=bucket_name, Key=key_prefix)
    except:
        return False

    return True
项目:aws-waf-security-automation    作者:cerbo    | 项目源码 | 文件源码
def waf_get_ip_set(ip_set_id):
    response = None
    waf = boto3.client('waf')

    for attempt in range(API_CALL_NUM_RETRIES):
        try:
            response = waf.get_ip_set(IPSetId=ip_set_id)
        except Exception, e:
            print(e)
            delay = math.pow(2, attempt)
            print("[waf_get_ip_set] Retrying in %d seconds..." % (delay))
            time.sleep(delay)
        else:
            break
    else:
        print("[waf_get_ip_set] Failed ALL attempts to call API")

    return response
项目:aws-waf-security-automation    作者:cerbo    | 项目源码 | 文件源码
def waf_update_ip_set(ip_set_id, updates_list):
    response = None

    if updates_list != []:
        waf = boto3.client('waf')
        for attempt in range(API_CALL_NUM_RETRIES):
            try:
                response = waf.update_ip_set(IPSetId=ip_set_id,
                    ChangeToken=waf.get_change_token()['ChangeToken'],
                    Updates=updates_list)
            except Exception, e:
                delay = math.pow(2, attempt)
                print("[waf_update_ip_set] Retrying in %d seconds..." % (delay))
                time.sleep(delay)
            else:
                break
        else:
            print("[waf_update_ip_set] Failed ALL attempts to call API")

    return response
项目:aws-waf-security-automation    作者:cerbo    | 项目源码 | 文件源码
def waf_update_ip_set(ip_set_id, source_ip):
    waf = boto3.client('waf')
    for attempt in range(API_CALL_NUM_RETRIES):
        try:
            response = waf.update_ip_set(IPSetId=ip_set_id,
                ChangeToken=waf.get_change_token()['ChangeToken'],
                Updates=[{
                    'Action': 'INSERT',
                    'IPSetDescriptor': {
                        'Type': 'IPV4',
                        'Value': "%s/32"%source_ip
                    }
                }]
            )
        except Exception, e:
            delay = math.pow(2, attempt)
            print "[waf_update_ip_set] Retrying in %d seconds..." % (delay)
            time.sleep(delay)
        else:
            break
    else:
        print "[waf_update_ip_set] Failed ALL attempts to call API"
项目:aws-waf-security-automation    作者:cerbo    | 项目源码 | 文件源码
def remove_s3_bucket_lambda_event(bucket_name, lambda_function_arn):
    s3 = boto3.resource('s3')
    s3_client = boto3.client('s3')
    try:
        new_conf = {}
        notification_conf = s3_client.get_bucket_notification_configuration(Bucket=bucket_name)
        if 'TopicConfigurations' in notification_conf:
            new_conf['TopicConfigurations'] = notification_conf['TopicConfigurations']
        if 'QueueConfigurations' in notification_conf:
            new_conf['QueueConfigurations'] = notification_conf['QueueConfigurations']

        if 'LambdaFunctionConfigurations' in notification_conf:
            new_conf['LambdaFunctionConfigurations'] = []
            for lfc in notification_conf['LambdaFunctionConfigurations']:
                if lfc['LambdaFunctionArn'] == lambda_function_arn:
                    continue #remove all references for Log Parser event
                else:
                    new_conf['LambdaFunctionConfigurations'].append(lfc)

        response = s3_client.put_bucket_notification_configuration(Bucket=bucket_name, NotificationConfiguration=new_conf)

    except Exception, e:
        print(e)
        print("[ERROR] Error to remove S3 Bucket lambda event")
项目:aws-waf-security-automation    作者:cerbo    | 项目源码 | 文件源码
def can_delete_rule(stack_name, rule_id):
    result = False
    for attempt in range(API_CALL_NUM_RETRIES):
        try:
            waf = boto3.client('waf')
            rule_detail = waf.get_rule(RuleId=rule_id)
            result = (stack_name == None or (rule_detail['Rule']['Name'].startswith(stack_name + " - ") and rule_detail['Rule']['Name'] != (stack_name + " - Whitelist Rule") ))
        except Exception, e:
            print(e)
            delay = math.pow(2, attempt)
            print("[can_delete_rule] Retrying in %d seconds..." % (delay))
            time.sleep(delay)
        else:
            break
    else:
        print("[can_delete_rule] Failed ALL attempts to call API")

    return result
项目:adefa    作者:butomo1989    | 项目源码 | 文件源码
def group(name, project, device):
    """
    Create a device group / pool.
    :param name: group name
    :param project: project id
    :param device: device id
    """
    device_str = '['
    for pos, item in enumerate(device):
        device_str += '"{item}"'.format(item=item)
        if pos == len(device) - 1:
            device_str += ']'
        else:
            device_str += ', '
    rules = [{'attribute': 'ARN', 'operator': 'IN', 'value': device_str}]
    res = client.create_device_pool(name=name, projectArn=project, rules=rules)
    print(res.get('devicePool').get('arn'))
项目:NotHotDog    作者:ryanml    | 项目源码 | 文件源码
def get_confidence(self, file):
        confidence = 0.0
        response = self.client.detect_labels(
            Image={
                'Bytes': self.get_bytes(self.uploads_dir + file)
            },
            MaxLabels=self.max_labels,
            MinConfidence=self.min_conf
        )
        labels = response['Labels']
        for label in labels:
            if label['Name'] == self.hd_label:
                confidence = label['Confidence']
                confidence = "%.2f" % float(confidence)
                break
        return confidence
项目:pg2kinesis    作者:handshake    | 项目源码 | 文件源码
def __init__(self, stream_name, back_off_limit=60, send_window=13):
        self.stream_name = stream_name
        self.back_off_limit = back_off_limit
        self.last_send = 0

        self._kinesis = boto3.client('kinesis')
        self._sequence_number_for_ordering = '0'
        self._record_agg = aws_kinesis_agg.aggregator.RecordAggregator()
        self._send_window = send_window

        try:
            self._kinesis.create_stream(StreamName=stream_name, ShardCount=1)
        except ClientError as e:
            # ResourceInUseException is raised when the stream already exists
            if e.response['Error']['Code'] != 'ResourceInUseException':
                logger.error(e)
                raise

        waiter = self._kinesis.get_waiter('stream_exists')

        # waits up to 180 seconds for stream to exist
        waiter.wait(StreamName=self.stream_name)
项目:albt    作者:geothird    | 项目源码 | 文件源码
def __update__(self):
        """
        Update function code and properties
        :return:
        """
        self.__details__()
        self.__zip_function__()
        response = self.client.update_function_code(
            FunctionName=self.function_name,
            ZipFile=self.__read_zip__()
        )
        if self.debug:
            PrintMsg.out(response)
        PrintMsg.cmd('Sha256: {}'.format(
            response['CodeSha256']), 'UPDATED CODE')
        self.__delete_zip__()
        if not self.dry:
            response = self.client.update_function_configuration(
                **self.config)
            if self.debug:
                PrintMsg.out(response)
            PrintMsg.cmd('Sha256: {}'.format(
                response['CodeSha256']), 'UPDATED CONFIG')
项目:zinc    作者:PressLabs    | 项目源码 | 文件源码
def destroy(self, dry_run=False):
        changes = []
        for record in self.records['ResourceRecordSets']:
            if record['Name'] == self.zone['Name'] and record['Type'] in ['NS', 'SOA']:
                continue
            changes.append({
                'Action': 'DELETE',
                'ResourceRecordSet': record
            })

        print('{} {} ({})'.format('Deleting' if dry_run else 'Will delete',
                                  self.zone['Name'], self.zone_id))
        if not dry_run:
            if changes:
                client.change_resource_record_sets(HostedZoneId=self.zone_id,
                                                   ChangeBatch={
                                                       'Changes': changes
                                                   })
            client.delete_hosted_zone(Id=self.zone_id)
项目:aws-certificate-management    作者:ImmobilienScout24    | 项目源码 | 文件源码
def setup_bucket_policy(cls):
        sts_client = boto3.client('sts', region_name='eu-west-1')
        account_id = sts_client.get_caller_identity()['Account']
        policy_document = {
            "Version": "2008-10-17",
            "Statement": [
                {
                    "Sid": "GiveSESPermissionToWriteEmail",
                    "Effect": "Allow",
                    "Principal": {
                        "Service": "ses.amazonaws.com"
                    },
                    "Action": "s3:PutObject",
                    "Resource": "arn:aws:s3:::{0}/*".format(cls.s3_bucket),
                    "Condition": {
                        "StringEquals": {
                            "aws:Referer": account_id
                        }
                    }
                }
            ]
        }
        s3 = boto3.resource('s3')
        policy = s3.BucketPolicy(cls.s3_bucket)
        policy.put(Policy=json.dumps(policy_document))
项目:api-manager    作者:edx    | 项目源码 | 文件源码
def create_apigw_custom_domain_name(domain_name, cert_name, cert_body, cert_pk, cert_chain):
    """Creates an api gateway custom domain entity"""

    client = boto3.client('apigateway', region_name=args.aws_region)

    try:
        response = client.create_domain_name(
            domainName=domain_name,
            certificateName=cert_name,
            certificateBody=cert_body,
            certificatePrivateKey=cert_pk,
            certificateChain=cert_chain
        )
    except Exception, e:
        raise e

    return response
项目:api-manager    作者:edx    | 项目源码 | 文件源码
def bootstrap_api(stage_name):
    """
    Upload a bootstrap Swagger document to a new API Gateway object and set it live
    with environment-specific variables.
    """

    client = boto3.client('apigateway', region_name=args.aws_region)

    # bootstrap.json is relative to me; where am I?
    my_dir = os.path.dirname(os.path.realpath(__file__))

    bootstrap_swagger = open(my_dir + '/bootstrap.json', 'r')

    response = client.import_rest_api(body=bootstrap_swagger.read())
    logging.info('New bootstrap API ID "%s" created', response['id'])

    client.create_deployment(
        restApiId=response['id'],
        stageName=stage_name)
    logging.info('API ID "%s" deployed to stage "%s"', response['id'], stage_name)

    return response['id']
项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def main():

    parser = argparse.ArgumentParser(description=help)
    parser.add_argument('job_definition_filename', type=str)
    parser.add_argument('image_url', type=str)

    parser.add_argument('--deregister', action='store_true',
                        help='Deregister old verison of the job definition after updating')
    args = parser.parse_args()

    path_to_config_json = os.path.join('.', 'job-definitions',
                                       args.job_definition_filename)
    with open(path_to_config_json, 'r') as json_file:
        job_definition = json.load(json_file)
        job_definition['containerProperties']['image'] = args.image_url

        client = boto3.client('batch')
        response = client.register_job_definition(**job_definition)

        if args.deregister:
            old_revision = int(response['revision']) - 1
            old_job_definition = '{}:{}'.format(response['jobDefinitionName'], old_revision)
            client.deregister_job_definition(jobDefinition=old_job_definition)

        print('{}:{}'.format(response['jobDefinitionName'], response['revision']), end='')
项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def get_latest_job_definition(job_definition_name):
    """ Get the latest revision of an AWS Batch job definition

    Raises NoActiveJobDefinitionRevision if no current active revision for the
        requested job definition

    """
    client = boto3.client('batch')
    response = client.describe_job_definitions(jobDefinitionName=job_definition_name,
                                               status='ACTIVE')
    job_definitions = response.get('jobDefinitions', [])
    while(response.get('nextToken') is not None):
        response = client.describe_job_definitions(jobDefinitionName=job_definition_name,
                                                   status='ACTIVE',
                                                   nextToken=response['nextToken'])
        job_definitions.extend(response.get('jobDefinitions', []))
    sorted_definitions = sorted(job_definitions, key=lambda job: job['revision'])
    try:
        return sorted_definitions.pop()
    except IndexError:
        raise NoActiveJobDefinitionRevision(job_definition=job_definition_name)
项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def cancel(self, reason=None):
        """ Cancel the analysis job, if its running """
        if not reason:
            reason = 'AnalysisJob terminated by user at {}'.format(datetime.utcnow())

        if self.status in self.Status.ACTIVE_STATUSES:
            logger.info('Cancelling job: {}'.format(self))
            old_status = self.status
            self.update_status(self.Status.CANCELLED)
            if self.batch_job_id is not None:
                try:
                    client = boto3.client('batch')
                    client.terminate_job(jobId=self.batch_job_id, reason=reason)
                except:
                    self.update_status(old_status,
                                       'REVERTED',
                                       'Reverted due to failure cancelling job in AWS Batch')
                    raise
项目:sync-buckets-state-machine    作者:awslabs    | 项目源码 | 文件源码
def check_bucket(bucket):
    s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION)

    print('Checking bucket: ' + bucket)
    try:
        s3.head_bucket(Bucket=bucket)
    except ClientError:
        print('Creating bucket: ' + bucket)
        args = {
            'Bucket': bucket
        }
        if AWS_DEFAULT_REGION != 'us-east-1':
            args['CreateBucketConfiguration'] = {
                'LocationConstraint': AWS_DEFAULT_REGION
            }
        s3.create_bucket(**args)
        waiter = s3.get_waiter('bucket_exists')
        waiter.wait(Bucket=bucket)
项目:sync-buckets-state-machine    作者:awslabs    | 项目源码 | 文件源码
def get_timestamp_from_s3_object(bucket, key):
    s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION)

    try:
        response = s3.get_object(
            Bucket=bucket,
            Key=key
        )
        timestamp = response['LastModified']  # We assume this is UTC.
    except ClientError:
        timestamp = datetime(1970, 1, 1, tzinfo=None)

    return (timestamp.replace(tzinfo=None) - datetime(1970, 1, 1, tzinfo=None)).total_seconds()


# IAM
项目:sync-buckets-state-machine    作者:awslabs    | 项目源码 | 文件源码
def get_arn_from_policy_name(policy_name):
    iam = boto3.client('iam', region_name=AWS_DEFAULT_REGION)

    args = {
        'Scope': 'All'
    }
    while True:
        response = iam.list_policies(**args)
        for p in response['Policies']:
            if p['PolicyName'] == policy_name:
                return p['Arn']
        if response['IsTruncated']:
            args['Marker'] = response['Marker']
        else:
            return None


# Lambda
项目:sync-buckets-state-machine    作者:awslabs    | 项目源码 | 文件源码
def execute_cfn_change_set(change_set_id):
    cfn = boto3.client('cloudformation', region_name=AWS_DEFAULT_REGION)

    print('Executing CloudFormation change set...')
    cfn.execute_change_set(ChangeSetName=change_set_id)

    while True:
        response = get_cfn_stack_info()
        if response is None:
            status = 'UNKNOWN'
        else:
            status = response.get('StackStatus', 'UNKNOWN')

        print('Status: ' + status)
        if 'StatusReason' in response:
            print('Reason: ' + response['StatusReason'])
        if status.endswith('FAILED') or status == 'ROLLBACK_COMPLETE':
            exit(1)
        elif status == 'UNKNOWN':
            print('Stack info:\n' + json.dumps(response, sort_keys=True, indent=4, default=str))
        elif status.endswith('COMPLETE'):
            return

        time.sleep(SLEEP_TIME)
项目:CAL    作者:HPCC-Cloud-Computing    | 项目源码 | 文件源码
def create(self, name, cidr, **kargs):

        # step1: create vpc
        vpc = self.client.create_vpc(
            CidrBlock=cidr,
            InstanceTenancy='default'
        ).get('Vpc')
        # step 2: create subnet
        subnet = self.client.create_subnet(
            VpcId=vpc.get('VpcId'),
            CidrBlock=cidr
        ).get('Subnet')

        result = {'name': subnet['SubnetId'],
                  'description': None,
                  'id': subnet['SubnetId'],
                  'cidr': subnet['CidrBlock'],
                  'cloud': PROVIDER,
                  'gateway_ip': None,
                  'security_group': None,
                  'allocation_pools': None,
                  'dns_nameservers': None
                  }

        return result
项目:CAL    作者:HPCC-Cloud-Computing    | 项目源码 | 文件源码
def show(self, subnet_id):
        subnet = self.client.describe_subnets(
            SubnetIds=[subnet_id]).get('Subnets')[0]

        result = {'name': subnet['SubnetId'],
                  'description': None,
                  'id': subnet['SubnetId'],
                  'cidr': subnet['CidrBlock'],
                  'cloud': PROVIDER,
                  'gateway_ip': None,
                  'security_group': None,
                  'allocation_pools': None,
                  'dns_nameservers': None
                  }

        return result
项目:CAL    作者:HPCC-Cloud-Computing    | 项目源码 | 文件源码
def list(self, **search_opts):
        subnets = self.client.describe_subnets(**search_opts).get('Subnets')
        result = []
        for subnet in subnets:
            sub = {'name': subnet['SubnetId'],
                   'description': None,
                   'id': subnet['SubnetId'],
                   'cidr': subnet['CidrBlock'],
                   'cloud': PROVIDER,
                   'gateway_ip': None,
                   'security_group': None,
                   'allocation_pools': None,
                   'dns_nameservers': None
                   }
            result.append(sub)

        return result
项目:CAL    作者:HPCC-Cloud-Computing    | 项目源码 | 文件源码
def copy_object(self, container, obj, metadata=None,
                    destination=None, **kwargs):
        copysource = {
            'Bucket': container,
            'Key': obj
        }

        if destination:
            metadata_directive = 'COPY'
            dst_container, dst_obj = destination.strip('/').split('/')
        else:
            metadata_directive = 'REPLACE'
            dst_container, dst_obj = container, obj
        if not metadata:
            metadata = {}
        return self.client.copy_object(Bucket=dst_container, Key=dst_obj,
                                       Metadata=metadata,
                                       MetadataDirective=metadata_directive,
                                       CopySource=copysource)
项目:ThreatPrep    作者:ThreatResponse    | 项目源码 | 文件源码
def __init__(self, region='us-east-1'):
        self.check_categories = ['S3','IAM', 'VPC', 'CloudWatch', 'CloudTrail']
        self.ec2 = boto3.resource("ec2", region_name=region)
        self.ec2_client = boto3.client("ec2", region_name=region)
        self.cloudwatch = boto3.resource("cloudwatch", region_name=region)
        self.cloudwatch_client = boto3.client("cloudwatch", region_name=region)
        self.cloudtrail_client = boto3.client('cloudtrail', region_name=region)
        self.iam = boto3.resource("iam", region_name=region)
        self.iam_client = boto3.client("iam", region_name=region)
        self.s3 = boto3.resource("s3", region_name=region)

        self.results = []
        self.results_dict = {}
项目:ThreatPrep    作者:ThreatResponse    | 项目源码 | 文件源码
def check_vpcs(self):
        #collect vpc ids
        regions = get_regions()
        for region in regions:
            ec2 = boto3.resource('ec2', region_name=region)
            ec2_client = boto3.client('ec2', region_name=region)
            ids = [ x.id for x in ec2.vpcs.all() ]
            flowlogs = self.get_flowlogs_by_vpc_id(ec2_client)

            for vpc_id in ids:
                vpc_dict = flowlogs.get(vpc_id, None)
                self.append_collection(
                    misc_checks.VPCFlowLogCheck(vpc_id, vpc_dict)
                    )
项目:ThreatPrep    作者:ThreatResponse    | 项目源码 | 文件源码
def get_regions():
    client = boto3.client('ec2', region_name='us-east-1')
    regions = [ x['RegionName'] for x in client.describe_regions()['Regions']]
    return regions
项目:aws-consolidated-admin    作者:awslabs    | 项目源码 | 文件源码
def lambda_handler(event, context):
    sess = boto3.session.Session(
        aws_access_key_id=event['Credentials']['AccessKeyId'],
        aws_secret_access_key=decrypt(
            event['Credentials']['SecretAccessKeyCiphertext']),
        aws_session_token=event['Credentials']['SessionToken'],
        region_name=event['Region'])

    cfn = sess.client('cloudformation')

    resp = cfn.delete_stack(StackName=event['Stack']['StackId'])

    return {
        'RequestId': resp['ResponseMetadata']['RequestId']
    }