Python django.conf.settings 模块,AWS_REGION 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用django.conf.settings.AWS_REGION

项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def base_environment(self):
        """ Convenience method for copying the environment to hand to batch jobs """

        # Since we run django manage commands in the analysis container, it needs a copy of
        # all the environment variables that this app needs, most of which are conveniently
        # prefixed with 'PFB_'
        # Set these first so they can be overridden by job specific settings below
        environment = {key: val for (key, val) in os.environ.items()
                       if key.startswith('PFB_') and val is not None}
        # For the ones without the 'PFB_' prefix, send the settings rather than the original
        # environment variables because the environment variables might be None, which is not
        # acceptable as a container override environment value, but the settings values will be set
        # to whatever they default to in settings.
        environment.update({
            'DJANGO_ENV': settings.DJANGO_ENV,
            'DJANGO_LOG_LEVEL': settings.DJANGO_LOG_LEVEL,
            'AWS_DEFAULT_REGION': settings.AWS_REGION,
        })
        return environment
项目:beekeeper    作者:pybee    | 项目源码 | 文件源码
def stop(self, aws_session=None, ecs_client=None):
        if ecs_client is None:
            if aws_session is None:
                aws_session = boto3.session.Session(
                    region_name=settings.AWS_REGION,
                    aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                    aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
                )

            ecs_client = aws_session.client('ecs')

        response = ecs_client.stop_task(
            cluster=settings.AWS_ECS_CLUSTER_NAME,
            task=self.arn
        )
        self.status = Task.STATUS_STOPPING
        self.save()
项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def logs_url(self):
        url = ('https://console.aws.amazon.com/cloudwatch/home?region={aws_region}' +
               '#logStream:group=/aws/batch/job;prefix={batch_job_name}/{batch_job_id}' +
               ';streamFilter=typeLogStreamPrefix')
        return url.format(aws_region=settings.AWS_REGION,
                          batch_job_name=self.analysis_job_name,
                          batch_job_id=self.batch_job_id)
项目:videofront    作者:openfun    | 项目源码 | 文件源码
def s3_client(self):
        if self._s3_client is None:
            self._s3_client = self.session.client('s3', region_name=settings.AWS_REGION)
        return self._s3_client
项目:videofront    作者:openfun    | 项目源码 | 文件源码
def elastictranscoder_client(self):
        if self._elastictranscoder_client is None:
            self._elastictranscoder_client = self.session.client('elastictranscoder', region_name=settings.AWS_REGION)
        return self._elastictranscoder_client
项目:videofront    作者:openfun    | 项目源码 | 文件源码
def _get_download_base_url(self):
        cloudfront = getattr(settings, 'CLOUDFRONT_DOMAIN_NAME', None)
        if cloudfront:
            # Download from cloudfront
            return "https://{domain}".format(domain=cloudfront)
        else:
            return "https://s3-{region}.amazonaws.com/{bucket}".format(
                region=settings.AWS_REGION,
                bucket=settings.S3_BUCKET,
            )
项目:videofront    作者:openfun    | 项目源码 | 文件源码
def create_bucket(self, bucket_name, acl):
        backend = Backend()
        try:
            backend.s3_client.head_bucket(Bucket=bucket_name)
            self.stdout.write("Bucket {} already exists".format(bucket_name))
        except ClientError:
            self.stdout.write("Creating bucket {}...".format(bucket_name))
            backend.s3_client.create_bucket(
                ACL=acl,
                Bucket=bucket_name,
                CreateBucketConfiguration={
                    'LocationConstraint': settings.AWS_REGION
                }
            )
            self.stdout.write("Updating CORS configuration...")
            backend.s3_client.put_bucket_cors(
                Bucket=bucket_name,
                CORSConfiguration={
                    'CORSRules': [
                        {
                            'AllowedHeaders': [
                                '*',
                            ],
                            'AllowedMethods': [
                                'GET', 'PUT',
                            ],
                            'AllowedOrigins': [
                                '*',
                            ],
                            'MaxAgeSeconds': 3000
                        },
                    ]
                }
            )
项目:odl-video-service    作者:mitodl    | 项目源码 | 文件源码
def get_transcoder_client():
    """
    Get an ElasticTranscoder client object

    Returns:
        botocore.client.ElasticTranscoder:
            An ElasticTranscoder client object
    """
    return boto3.client('elastictranscoder', settings.AWS_REGION)
项目:odl-video-service    作者:mitodl    | 项目源码 | 文件源码
def handle(self, *args, **options):
        """
        Run the command
        """
        with open(options['filejson']) as filejson:
            presets = json.load(filejson)

        client = boto3.client('elastictranscoder',
                              region_name=settings.AWS_REGION,
                              aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                              aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
        for preset in presets:
            preset['created'] = client.create_preset(**preset)
        self.stdout.write('ET_PRESET_IDS={}'.format(','.join(
            [preset['created']['Preset']['Id'] for preset in presets])))
项目:beekeeper    作者:pybee    | 项目源码 | 文件源码
def terminate(self, aws_session=None, ec2_client=None):
        if ec2_client is None:
            if aws_session is None:
                aws_session = boto3.session.Session(
                    region_name=settings.AWS_REGION,
                    aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                    aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
                )

            ec2_client = aws_session.client('ec2')

        # Save the new state of the instance.
        self.active = False
        self.save()

        # Actually terminate the instance
        try:
            ec2_client.terminate_instances(InstanceIds=[self.ec2_id])

            # Record the termination time.
            self.terminated = timezone.now()
            self.save()
        except ClientError as e:
            raise RuntimeError('Problem terminating %s: [%s] %s' % (
                self, e.response['Error']['Code'], e.response['Error']['Message'],
            ))
项目:django-eb-sqs    作者:cuda-networks    | 项目源码 | 文件源码
def handle(self, *args, **options):
        if not options['url']:
            raise CommandError('Worker endpoint url parameter (--url) not found')

        if not options['queue_name']:
            raise CommandError('Queue name (--queue) not specified')

        url = options['url']
        queue_name = options['queue_name']
        retry_limit = max(int(options['retry_limit']), 1)

        try:
            self.stdout.write('Connect to SQS')
            sqs = boto3.resource(
                'sqs',
                region_name=settings.AWS_REGION,
                config=Config(retries={'max_attempts': settings.AWS_MAX_RETRIES})
            )
            queue = sqs.get_queue_by_name(QueueName=queue_name)
            self.stdout.write('> Connected')

            while True:
                messages = queue.receive_messages(
                    MaxNumberOfMessages=1,
                    WaitTimeSeconds=20
                )

                if len(messages) == 0:
                    break

                for msg in messages:
                    self.stdout.write('Deliver message {}'.format(msg.message_id))
                    if self._process_message_with_retry(url, retry_limit, msg):
                        self.stdout.write('> Delivered')
                    else:
                        self.stdout.write('> Delivery failed (retry-limit reached)')
                    msg.delete()

            self.stdout.write('Message processing finished')
        except ConnectionError:
            self.stdout.write('Connection to {} failed. Message processing failed'.format(url))
项目:odl-video-service    作者:mitodl    | 项目源码 | 文件源码
def transcode_video(video, video_file):
    """
    Start a transcode job for a video

    Args:
        video(ui.models.Video): the video to transcode
        video_file(ui.models.Videofile): the s3 file to use for transcoding
    """

    video_input = {
        'Key': video_file.s3_object_key,
    }

    # Generate an output video file for each encoding (assumed to be HLS)
    outputs = [{
        'Key': video.transcode_key(preset),
        'PresetId': preset,
        'SegmentDuration': '10.0'
    } for preset in settings.ET_PRESET_IDS]

    playlists = [{
        'Format': 'HLSv3',
        'Name': video.transcode_key('_index'),
        'OutputKeys': [output['Key'] for output in outputs]
    }]

    # Generate thumbnails for the 1st encoding (no point in doing so for each).
    outputs[0]['ThumbnailPattern'] = THUMBNAIL_PATTERN.format(video_file.s3_basename)
    transcoder = VideoTranscoder(
        settings.ET_PIPELINE_ID,
        settings.AWS_REGION,
        settings.AWS_ACCESS_KEY_ID,
        settings.AWS_SECRET_ACCESS_KEY
    )
    try:
        transcoder.encode(video_input, outputs, Playlists=playlists)
    except ClientError as exc:
        log.error('Transcode job creation failed for video %s', video.id)
        video.update_status(VideoStatus.TRANSCODE_FAILED_INTERNAL)
        if hasattr(exc, 'response'):
            transcoder.message = exc.response
        raise
    finally:
        transcoder.create_job_for_object(video)
        if video.status not in (VideoStatus.TRANSCODE_FAILED_INTERNAL, VideoStatus.TRANSCODE_FAILED_VIDEO, ):
            video.update_status(VideoStatus.TRANSCODING)
项目:beekeeper    作者:pybee    | 项目源码 | 文件源码
def task_status(request, owner, repo_name, change_pk, build_pk, task_slug):
    try:
        task = Task.objects.get(
                        build__change__project__repository__owner__login=owner,
                        build__change__project__repository__name=repo_name,
                        build__change__pk=change_pk,
                        build__pk=build_pk,
                        slug=task_slug
                    )
    except Task.DoesNotExist:
        raise Http404

    try:
        kwargs = {
            'nextToken': request.GET['nextToken']
        }
    except KeyError:
        kwargs = {}

    aws_session = boto3.session.Session(
        region_name=settings.AWS_REGION,
        aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
        aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
    )
    logs = aws_session.client('logs')

    try:
        log_response = logs.get_log_events(
            logGroupName='beekeeper',
            logStreamName=task.log_stream_name,
            **kwargs
        )
        log_data = '\n'.join(
                event['message']
                for event in log_response['events']
            )
        message = None
        next_token = log_response['nextForwardToken']
        no_more_logs = log_response['nextForwardToken'] == kwargs.get('nextToken', None)
    except Exception as e:
        if task.has_error:
            log_data = None
            message = 'No logs; task did not start.'
            next_token = ''
            no_more_logs = True
        else:
            log_data = None
            message = 'Waiting for logs to become available...'
            next_token = ''
            no_more_logs = False

    return HttpResponse(json.dumps({
            'started': task.has_started,
            'log': log_data,
            'message': message,
            'status': task.full_status_display(),
            'result': task.result,
            'nextToken': next_token,
            'finished': task.is_finished and no_more_logs,
        }), content_type="application/json")