Python botocore.exceptions 模块,PaginationError() 实例源码

我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用botocore.exceptions.PaginationError()

项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._pagination_cfg.get('limit_key', None) is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._limit_key is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            input_members = self._model.input_shape.members
            limit_key_shape = input_members.get(self._limit_key)
            if limit_key_shape.type_name == 'string':
                if not isinstance(page_size, six.string_types):
                    page_size = str(page_size)
            else:
                page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:aws-ec2rescue-linux    作者:awslabs    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._pagination_cfg.get('limit_key', None) is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:jepsen-training-vpc    作者:bloomberg    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._pagination_cfg.get('limit_key', None) is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:AWS-AutoTag    作者:cpollard0    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._pagination_cfg.get('limit_key', None) is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:tf_aws_ecs_instance_draining_on_scale_in    作者:terraform-community-modules    | 项目源码 | 文件源码
def _extract_paging_params(self, kwargs):
        pagination_config = kwargs.pop('PaginationConfig', {})
        max_items = pagination_config.get('MaxItems', None)
        if max_items is not None:
            max_items = int(max_items)
        page_size = pagination_config.get('PageSize', None)
        if page_size is not None:
            if self._pagination_cfg.get('limit_key', None) is None:
                raise PaginationError(
                    message="PageSize parameter is not supported for the "
                            "pagination interface for this operation.")
            page_size = int(page_size)
        return {
            'MaxItems': max_items,
            'StartingToken': pagination_config.get('StartingToken', None),
            'PageSize': page_size,
        }
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def ensure_paging_params_not_set(parsed_args, shadowed_args):
    paging_params = ['starting_token', 'page_size', 'max_items']
    shadowed_params = [p.replace('-', '_') for p in shadowed_args.keys()]
    params_used = [p for p in paging_params if
                   p not in shadowed_params and getattr(parsed_args, p, None)]

    if len(params_used) > 0:
        converted_params = ', '.join(
            ["--" + p.replace('_', '-') for p in params_used])
        raise PaginationError(
            message="Cannot specify --no-paginate along with pagination "
                    "arguments: %s" % converted_params)
项目:aws-cfn-plex    作者:lordmuffin    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token
项目:AshsSDK    作者:thehappydinoa    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            else:
                # If this isn't the first request, we have already sliced into
                # the first request and had to make additional requests after.
                # We no longer need to add this to truncation.
                starting_truncation = 0
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token
项目:aws-ec2rescue-linux    作者:awslabs    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token
项目:jepsen-training-vpc    作者:bloomberg    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token
项目:AWS-AutoTag    作者:cpollard0    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token
项目:tf_aws_ecs_instance_draining_on_scale_in    作者:terraform-community-modules    | 项目源码 | 文件源码
def __iter__(self):
        current_kwargs = self._op_kwargs
        previous_next_token = None
        next_token = dict((key, None) for key in self._input_token)
        # The number of items from result_key we've seen so far.
        total_items = 0
        first_request = True
        primary_result_key = self.result_keys[0]
        starting_truncation = 0
        self._inject_starting_params(current_kwargs)
        while True:
            response = self._make_request(current_kwargs)
            parsed = self._extract_parsed_response(response)
            if first_request:
                # The first request is handled differently.  We could
                # possibly have a resume/starting token that tells us where
                # to index into the retrieved page.
                if self._starting_token is not None:
                    starting_truncation = self._handle_first_request(
                        parsed, primary_result_key, starting_truncation)
                first_request = False
                self._record_non_aggregate_key_values(parsed)
            current_response = primary_result_key.search(parsed)
            if current_response is None:
                current_response = []
            num_current_response = len(current_response)
            truncate_amount = 0
            if self._max_items is not None:
                truncate_amount = (total_items + num_current_response) \
                                  - self._max_items
            if truncate_amount > 0:
                self._truncate_response(parsed, primary_result_key,
                                        truncate_amount, starting_truncation,
                                        next_token)
                yield response
                break
            else:
                yield response
                total_items += num_current_response
                next_token = self._get_next_token(parsed)
                if all(t is None for t in next_token.values()):
                    break
                if self._max_items is not None and \
                        total_items == self._max_items:
                    # We're on a page boundary so we can set the current
                    # next token to be the resume token.
                    self.resume_token = next_token
                    break
                if previous_next_token is not None and \
                        previous_next_token == next_token:
                    message = ("The same next token was received "
                               "twice: %s" % next_token)
                    raise PaginationError(message=message)
                self._inject_token_into_kwargs(current_kwargs, next_token)
                previous_next_token = next_token