Python elasticsearch.exceptions 模块,NotFoundError() 实例源码

我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用elasticsearch.exceptions.NotFoundError()

项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def test_index_percolate_query(self):
        """Test that we index the percolate query"""
        query = {"query": {"match": {"profile.first_name": "here"}}}
        percolate_query = PercolateQueryFactory.create(query=query, original_query="original")
        percolate_query_id = 123
        percolate_query.id = percolate_query_id
        # Don't save since that will trigger a signal which will update the index
        with self.assertRaises(NotFoundError):
            es.get_percolate_query(percolate_query_id)
        index_percolate_queries([percolate_query])
        assert es.get_percolate_query(percolate_query_id) == {
            '_id': str(percolate_query_id),
            '_index': es.get_default_backing_index(),
            '_source': query,
            '_type': PERCOLATE_DOC_TYPE,
            '_version': 1,
            'found': True,
        }
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def test_delete_percolate_queries(self):
        """Test that we delete the percolate query from the index"""
        query = {"query": {"match": {"profile.first_name": "here"}}}
        with patch('search.signals.transaction', on_commit=lambda callback: callback()):
            percolate_query = PercolateQueryFactory.create(query=query, original_query="original")
            assert es.get_percolate_query(percolate_query.id) == {
                '_id': str(percolate_query.id),
                '_index': es.get_default_backing_index(),
                '_source': query,
                '_type': PERCOLATE_DOC_TYPE,
                '_version': 1,
                'found': True,
            }
            delete_percolate_query(percolate_query.id)
            with self.assertRaises(NotFoundError):
                es.get_percolate_query(percolate_query.id)
            # If we delete it again there should be no exception
            delete_percolate_query(percolate_query.id)
            with self.assertRaises(NotFoundError):
                es.get_percolate_query(percolate_query.id)
项目:handelsregister    作者:Amsterdam    | 项目源码 | 文件源码
def execute(self):

        idx = es.Index(self.index)

        try:
            idx.delete(ignore=404)
            log.info("Deleted index %s", self.index)
        except AttributeError:
            log.warning("Could not delete index '%s', ignoring", self.index)
        except NotFoundError:
            log.warning("Could not delete index '%s', ignoring", self.index)

        # create doc types
        for dt in self.doc_types:
            idx.doc_type(dt)

        # create index
        idx.create()
项目:find-that-charity    作者:TechforgoodCAST    | 项目源码 | 文件源码
def fetch_postcode(postcode, es, es_index="postcode", es_type="postcode"):
    if postcode is None:
        return None

    areas = ["hro", "wz11", "bua11", "pct", "lsoa11", "nuts", "msoa11", "laua",
             "oa11", "ccg", "ward", "teclec", "gor", "ttwa", "pfa", "pcon",
             "lep1", "cty", "eer", "ctry", "park", "lep2", "hlthau", "buasd11"]
    try:
        res = es.get(index=es_index, doc_type=es_type,
                     id=postcode, ignore=[404])
        if res['found']:
            return (res['_source'].get("location"),
                    {k: res['_source'].get(k) for
                     k in res['_source'] if k in areas})
    except (NotFoundError, ValueError):
        return None
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def get(self, index, id, doc_type='_all', params=None):
        result = None
        if index in self.__documents_dict:
            for document in self.__documents_dict[index]:
                if document.get('_id') == id:
                    if doc_type == '_all':
                        result = document
                        break
                    else:
                        if document.get('_type') == doc_type:
                            result = document
                            break

        if result:
            result['found'] = True
        else:
            error_data = {
                '_index': index,
                '_type': doc_type,
                '_id': id,
                'found': False
            }
            raise NotFoundError(404, json.dumps(error_data))

        return result
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def suggest(self, body, index=None, params=None):
        if index is not None and index not in self.__documents_dict:
            raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))

        result_dict = {}
        for key, value in body.items():
            text = value.get('text')
            suggestion = int(text) + 1 if isinstance(text, int) else '{0}_suggestion'.format(text)
            result_dict[key] = [
                {
                    'text': text,
                    'length': 1,
                    'options': [
                        {
                            'text': suggestion,
                            'freq': 1,
                            'score': 1.0
                        }
                    ],
                    'offset': 0
                }
            ]
        return result_dict
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def _normalize_index_to_list(self, index):
        # Ensure to have a list of index
        if index is None:
            searchable_indexes = self.__documents_dict.keys()
        elif isinstance(index, str) or isinstance(index, unicode):
            searchable_indexes = [index]
        elif isinstance(index, list):
            searchable_indexes = index
        else:
            # Is it the correct exception to use ?
            raise ValueError("Invalid param 'index'")

        # Check index(es) exists
        for searchable_index in searchable_indexes:
            if searchable_index not in self.__documents_dict:
                raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(searchable_index))

        return searchable_indexes
项目:kibtool    作者:jpparis-orange    | 项目源码 | 文件源码
def test_delete_dashid(self):
    (l_srcName, l_dstName) = self.create_indices()

    with patch('sys.stdout', new=StringIO()) as fake_out, patch('sys.stderr', new=StringIO()) as fake_err:
      l_kibtool = kibtool.KibTool(["./test_kibtool", "--kibfrom", l_srcName,
                                   "--dashid", "dashboard-1",
                                   "--delete"])
      l_kibtool.execute()
      self.assertEquals(fake_out.getvalue().strip(), "")
      self.assertEquals(fake_err.getvalue().strip(), "")

    try:
      self.client.get(index=l_srcName, doc_type="dashboard", id="dashboard-1")
      self.assertTrue(False, "dashboard-1 still present")
    except exceptions.NotFoundError as e:
      pass
    l_dst = self.client.search(index=l_dstName, doc_type="*", body={"query": {"match_all": {}}})
    self.assertEquals(l_dst["hits"]["total"], 0)
项目:kibtool    作者:jpparis-orange    | 项目源码 | 文件源码
def test_delete_dash(self):
    (l_srcName, l_dstName) = self.create_indices()

    with patch('sys.stdout', new=StringIO()) as fake_out, patch('sys.stderr', new=StringIO()) as fake_err:
      l_kibtool = kibtool.KibTool(["./test_kibtool", "--kibfrom", l_srcName,
                                   "--dash", "dashboard 1",
                                   "--delete"])
      l_kibtool.execute()
      self.assertEquals(fake_out.getvalue().strip(), "")
      self.assertEquals(fake_err.getvalue().strip(), "")

    try:
      self.client.get(index=l_srcName, doc_type="dashboard", id="dashboard-1")
      self.assertTrue(False, "dashboard-1 still present")
    except exceptions.NotFoundError as e:
      pass
    l_src = self.client.get(index=l_srcName, doc_type="dashboard", id="dashboard-8")
    l_srcIdx = l_src.pop("_index")
    self.assertEquals(l_srcIdx, l_srcName)
    l_dst = self.client.search(index=l_dstName, doc_type="*", body={"query": {"match_all": {}}})
    self.assertEquals(l_dst["hits"]["total"], 0)
项目:web    作者:pyjobs    | 项目源码 | 文件源码
def _synchronisation_op(self, elasticsearch_doctype, pending_insertions):
        self._logging(logging.INFO,
                      'Computing required operations to synchronize documents.')

        for p in pending_insertions:
            doc_dict = p.to_dict(True)

            try:
                elasticsearch_doctype.get(p.id)
                update_op = doc_dict
                update_op['_op_type'] = 'update'
                update_op['doc'] = doc_dict['_source']
                del update_op['_source']
                sync_op = update_op
            except NotFoundError:
                add_op = doc_dict
                add_op['_op_type'] = 'index'
                sync_op = add_op

            yield sync_op
项目:data-store    作者:HumanCellAtlas    | 项目源码 | 文件源码
def get(uuid: str, replica: str):
    owner = request.token_info['email']

    es_client = ElasticsearchClient.get(logger)

    try:
        response = es_client.get(index=Config.get_es_index_name(ESIndexType.subscriptions, Replica[replica]),
                                 doc_type=ESDocType.subscription.name,
                                 id=uuid)
    except NotFoundError as ex:
        raise DSSException(requests.codes.not_found, "not_found", "Cannot find subscription!")

    source = response['_source']
    source['uuid'] = uuid
    source['replica'] = replica

    if source['owner'] != owner:
        # common_error_handler defaults code to capitalized 'Forbidden' for Werkzeug exception. Keeping consistent.
        raise DSSException(requests.codes.forbidden, "Forbidden", "Your credentials can't access this subscription!")

    return jsonify(source), requests.codes.okay
项目:spamscope    作者:SpamScope    | 项目源码 | 文件源码
def update_template(es, max_retry, template_path, template_name):
    with open(template_path) as f:
        body = f.read()

    for i in range(max_retry, 0, -1):
        try:
            es.indices.put_template(name=template_name, body=body)
            log.info("Updating template {!r} done".format(template_name))
            return

        except (ConnectionError, NotFoundError):
            log.warning(
                "Updating template {!r} failed. Waiting for {} sec".format(
                    template_name, i))
            time.sleep(i)

    log.error("Updating template {!r} definitely failed".format(template_name))
项目:django_simple_forums    作者:cdriehuys    | 项目源码 | 文件源码
def test_update_old_threads(self):
        """ Test updating the index with old threads.

        If there was a thread that was previously in the index and has
        since been deleted, then it should be removed from the index.
        """
        thread = create_thread()
        thread_pk = thread.pk

        backend = ElasticSearch()
        backend.add(thread)

        thread.delete()

        call_command('updateindex', stdout=self.out)

        es = Elasticsearch([{'host': 'localhost', 'port': 9200}])

        with self.assertRaises(NotFoundError):
            es.get_source(
                index='test',
                doc_type='thread',
                id=thread_pk)
项目:django_simple_forums    作者:cdriehuys    | 项目源码 | 文件源码
def test_remove(self):
        """ Test removing an object from the search index.

        Removing an object from the search index should make it
        inaccessible to elasticsearch.
        """
        thread = create_thread()
        self.backend.add(thread)
        self.backend.remove(thread)

        es = Elasticsearch([{'host': 'localhost', 'port': 9200}])

        with self.assertRaises(NotFoundError):
            es.get_source(
                index=self.backend.index,
                doc_type='thread',
                id=thread.pk)
项目:django_simple_forums    作者:cdriehuys    | 项目源码 | 文件源码
def test_remove_message(self):
        """ Test removing a thread with messages.

        If a thread has messages assocated with it, those messages
        should be removed from the search backend when the thread
        instance is removed.
        """
        thread = create_thread()
        message = create_message(thread=thread)

        self.backend.add(thread)
        self.backend.remove(thread)

        es = Elasticsearch([{'host': 'localhost', 'port': 9200}])

        with self.assertRaises(NotFoundError):
            es.get_source(
                index=self.backend.index,
                doc_type='message',
                id=message.pk)
项目:django_simple_forums    作者:cdriehuys    | 项目源码 | 文件源码
def test_wipe(self):
        """ Test wiping the search index.

        Objects in the search index prior to the wipe should no longer
        be searchable.
        """
        thread = create_thread()
        self.backend.add(thread)

        self.backend.wipe()

        with self.assertRaises(NotFoundError):
            self.backend.es.get_source(
                index=self.backend.index,
                doc_type='thread',
                id=thread.pk)
项目:mygene.info    作者:biothings    | 项目源码 | 文件源码
def get(self, name):
        '''get a named filter.'''
        try:
            return self.conn.get(self.ES_INDEX_NAME, name, self.ES_DOC_TYPE)['_source']
        except NotFoundError:
            return None
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def document_needs_updating(enrollment):
    """
    Get the document from elasticsearch and see if it matches what's in the database

    Args:
        enrollment (ProgramEnrollment): A program enrollment

    Returns:
        bool: True if the document needs to be updated via reindex
    """
    conn = get_conn()
    try:
        document = conn.get(index=get_default_alias(), doc_type=USER_DOC_TYPE, id=enrollment.id)
    except NotFoundError:
        return True
    serialized_enrollment = serialize_program_enrolled_user(enrollment)
    del serialized_enrollment['_id']
    source = document['_source']

    if serialized_enrollment != source:
        # Convert OrderedDict to dict
        reserialized_enrollment = json.loads(json.dumps(serialized_enrollment))

        diff = make_patch(source, reserialized_enrollment).patch
        serialized_diff = json.dumps(diff, indent="    ")
        log.info("Difference found for enrollment %s: %s", enrollment, serialized_diff)
        return True
    return False
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def _delete_item(document_id, doc_type, index):
    """
    Helper function to delete a document

    Args:
        document_id (int): A document id
        doc_type (str): A document type
        index (str): An Elasticsearch index
    """
    conn = get_conn(verify_index=index)
    try:
        conn.delete(index=index, doc_type=doc_type, id=document_id)
    except NotFoundError:
        # Item is already gone
        pass
项目:elasticsearch2-haystack    作者:NDevox    | 项目源码 | 文件源码
def setup(self):
        """
        Defers loading until needed.
        """
        # Get the existing mapping & cache it. We'll compare it
        # during the ``update`` & if it doesn't match, we'll put the new
        # mapping.
        try:
            self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)
        except NotFoundError:
            pass
        except Exception:
            if not self.silently_fail:
                raise

        unified_index = haystack.connections[self.connection_alias].get_unified_index()
        self.content_field_name, field_mapping = self.build_schema(unified_index.all_searchfields())
        current_mapping = {
            'modelresult': {
                'properties': field_mapping,
            }
        }

        if current_mapping != self.existing_mapping:
            try:
                # Make sure the index is there first.
                self.conn.indices.create(index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400)
                self.conn.indices.put_mapping(index=self.index_name, doc_type='modelresult', body=current_mapping)
                self.existing_mapping = current_mapping
            except Exception:
                if not self.silently_fail:
                    raise

        self.setup_complete = True
项目:espandas    作者:dashaub    | 项目源码 | 文件源码
def es_read(self, keys, index, doc_type):
        """
        Read from an ElasticSearch index and return a DataFrame
        :param keys: a list of keys to extract in elasticsearch
        :param index: the ElasticSearch index to read
        :param doc_type: the ElasticSearch doc_type to read
        """
        self.successful_ = 0
        self.failed_ = 0

        # Collect records for all of the keys
        records = []
        for key in keys:
            try:
                record = self.client.get(index=index, doc_type=doc_type, id=key)
                self.successful_ += 1
                if '_source' in record:
                    records.append(record['_source'])
            except NotFoundError as nfe:
                print('Key not found: %s' % nfe)
                self.failed_ += 1

        # Prepare the records into a single DataFrame
        df = None
        if records:
            df = pd.DataFrame(records).fillna(value=np.nan)
            df = df.reindex_axis(sorted(df.columns), axis=1)
        return df
项目:invenio-stats    作者:inveniosoftware    | 项目源码 | 文件源码
def post(self, **kwargs):
        """Get statistics."""
        data = request.get_json(force=False)
        if data is None:
            data = {}
        result = {}
        for query_name, config in data.items():
            if config is None or not isinstance(config, dict) \
                    or (set(config.keys()) != {'stat', 'params'} and
                        set(config.keys()) != {'stat'}):
                raise InvalidRequestInputError(
                    'Invalid Input. It should be of the form '
                    '{ STATISTIC_NAME: { "stat": STAT_TYPE, '
                    '"params": STAT_PARAMS \}}'
                )
            stat = config['stat']
            params = config.get('params', {})
            try:
                query_cfg = current_stats.queries[stat]
            except KeyError:
                raise UnknownQueryError(stat)

            permission = current_stats.permission_factory(stat, params)
            if permission is not None and not permission.can():
                message = ('You do not have a permission to query the '
                           'statistic "{}" with those '
                           'parameters'.format(stat))
                if current_user.is_authenticated:
                    abort(403, message)
                abort(401, message)
            try:
                query = query_cfg.query_class(**query_cfg.query_config)
                result[query_name] = query.run(**params)
            except ValueError as e:
                raise InvalidRequestInputError(e.args[0])
            except NotFoundError as e:
                return None
        return self.make_response(result)
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def test_should_raise_notfounderror_when_nonindexed_id_is_used(self):
        with self.assertRaises(NotFoundError):
            self.es.get(index=self.index_name, id='1')
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def test_should_raise_notfounderror_when_search_for_unexistent_index(self):
        with self.assertRaises(NotFoundError):
            self.es.search(index=self.index_name)
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def test_should_raise_exception_when_delete_nonindexed_document(self):
        with self.assertRaises(NotFoundError):
            self.es.delete(index=self.index_name, doc_type=self.doc_type, id=1)
项目:elasticmock    作者:vrcmarcos    | 项目源码 | 文件源码
def test_should_raise_notfounderror_when_nonindexed_id_is_used_for_suggest(self):
        with self.assertRaises(NotFoundError):
            self.es.suggest(body={}, index=self.index_name)
项目:kibtool    作者:jpparis-orange    | 项目源码 | 文件源码
def getObjects(self, p_luceneReq, p_type, p_ctor):
    l_request = {
      "fields": ["_id"],
      "size": self.m_args.count,
      "sort": {
        "_id": {
          "order": "asc"
        }
      },
      "query": {
        "query_string" : {
          "query" : "title:\"" + KibTool.toLuceneSyntax(p_luceneReq) + "\" AND _type:" + p_type
        }
      }
    }
    if self.m_args.debug:
      print("---", l_request)
    try:
      l_response = self.m_esfrom.search(index=self.m_args.kibfrom, doc_type=p_type, body=l_request)
    except exceptions.NotFoundError:
      print("*** Can't search in unknown index", self.m_args.kibfrom, file=sys.stderr)
      sys.exit(1)
    l_result = []
    if 0 == l_response["hits"]["total"]:
      print("*** No %s found for '%s' in index %s/%s" %
            (p_type, p_luceneReq, self.m_args.esfrom, self.m_args.kibfrom), file=sys.stderr)
      sys.exit(1)
    elif self.m_args.count < l_response["hits"]["total"]:
      print("*** Please use a greater --count (%d) to select all %ss" %
            (l_response["hits"]["total"], p_type), file=sys.stderr)
      sys.exit(1)
    else:
      for c_hit in l_response["hits"]["hits"]:
        l_d = p_ctor(self.m_esfrom, self.m_args.kibfrom, c_hit["_id"])
        l_result.append(l_d)
    return l_result
项目:kibtool    作者:jpparis-orange    | 项目源码 | 文件源码
def deleteFromEs(self):
    try:
      self.m_json = self.m_es.delete(index=self.m_index, doc_type=self.m_type, id=self.m_id)
    except exceptions.NotFoundError as e:
      return
项目:kibtool    作者:jpparis-orange    | 项目源码 | 文件源码
def readFromEs(self):
    if self.m_json:
      return
    try:
      self.m_json = self.m_es.get(index=self.m_index, doc_type=self.m_type, id=self.m_id)
    except exceptions.NotFoundError as e:
      return
项目:kge-server    作者:vfrico    | 项目源码 | 文件源码
def get_entity_dto(self, entity_uri):
        """Returns an EntityDAO given an entity_id
        """
        e_uuid = hashlib.md5(entity_uri.encode('utf-8')).hexdigest()

        try:
            entity = self.es.get(index=self.index, doc_type=self.type,
                                 id=e_uuid)
            return EntityDTO(entity['_source'])

        except es_exceptions.NotFoundError:
            return EntityDTO({})
项目:elasticsearch_loader    作者:moshe    | 项目源码 | 文件源码
def cli(ctx, **opts):
    ctx.obj = opts
    es_opts = {x: y for x, y in opts.items() if x in ('use_ssl', 'ca_certs', 'verify_certs', 'http_auth')}
    ctx.obj['es_conn'] = Elasticsearch(opts['es_host'], **es_opts)
    if opts['delete']:
        try:
            ctx.obj['es_conn'].indices.delete(opts['index'])
            log('info', 'Index %s deleted' % opts['index'])
        except NotFoundError:
            log('info', 'Skipping index deletion')
    if opts['index_settings_file']:
        if ctx.obj['es_conn'].indices.exists(index=opts['index']):
            ctx.obj['es_conn'].indices.put_settings(index=opts['index'], body=opts['index_settings_file'].read())
        else:
            ctx.obj['es_conn'].indices.create(index=opts['index'], body=opts['index_settings_file'].read())
    if ctx.invoked_subcommand is None:
        commands = cli.commands.keys()
        if ctx.default_map:
            default_command = ctx.default_map.get('default_command')
            if default_command:
                command = cli.get_command(ctx, default_command)
                if command:
                    ctx.invoke(command, **ctx.default_map[default_command]['arguments'])
                    return
                else:
                    ctx.fail('Cannot find default_command: {},\navailable commands are: {}'.format(default_command, ", ".join(commands)))
            else:
                ctx.fail('No subcommand specified via command line / task file,\navailable commands are: {}'.format(", ".join(commands)))
        else:
            ctx.fail('No subcommand specified via command line / task file,\navailable commands are: {}'.format(", ".join(commands)))
项目:nhaystack    作者:noveogroup    | 项目源码 | 文件源码
def setup(self):
        """
        Defers loading until needed.
        """
        # Get the existing mapping & cache it. We'll compare it
        # during the ``update`` & if it doesn't match, we'll put the new
        # mapping.
        try:
            self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)
        except NotFoundError:
            pass
        except Exception:
            if not self.silently_fail:
                raise

        unified_index = haystack.connections[self.connection_alias].get_unified_index()
        self.content_field_name, field_mapping = self.build_schema(unified_index.all_searchfields())
        # fixing ES 1.x/2.x compatible `_boost`
        current_mapping = {
            'modelresult': {
                'properties': field_mapping,
            }
        }
        if elasticsearch.VERSION < (2, 0, 0):
            current_mapping['modelresult']['_boost'] = {
                'name': 'boost',
                'null_value': 1.0
            }
        # end fixing ES 1.x/2.x compatible `_boost`

        if current_mapping != self.existing_mapping:
            try:
                # Make sure the index is there first.
                self.conn.indices.create(index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400)
                self.conn.indices.put_mapping(index=self.index_name, doc_type='modelresult', body=current_mapping)
                self.existing_mapping = current_mapping
            except Exception:
                if not self.silently_fail:
                    raise

        self.setup_complete = True
项目:django-haystack-elasticsearch    作者:CraveFood    | 项目源码 | 文件源码
def setup(self):
        """
        Defers loading until needed.
        """
        # Get the existing mapping & cache it. We'll compare it
        # during the ``update`` & if it doesn't match, we'll put the new
        # mapping.
        try:
            self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)
        except NotFoundError:
            pass
        except Exception:
            if not self.silently_fail:
                raise

        unified_index = haystack.connections[self.connection_alias].get_unified_index()
        self.content_field_name, field_mapping = self.build_schema(unified_index.all_searchfields())
        current_mapping = {
            'modelresult': {
                'properties': field_mapping,
            }
        }

        if current_mapping != self.existing_mapping:
            try:
                # Make sure the index is there first.
                self.conn.indices.create(index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400)
                self.conn.indices.put_mapping(index=self.index_name, doc_type='modelresult', body=current_mapping)
                self.existing_mapping = current_mapping
            except Exception:
                if not self.silently_fail:
                    raise

        self.setup_complete = True
项目:data-store    作者:HumanCellAtlas    | 项目源码 | 文件源码
def delete(uuid: str, replica: str):
    authenticated_user_email = request.token_info['email']

    es_client = ElasticsearchClient.get(logger)

    try:
        response = es_client.get(index=Config.get_es_index_name(ESIndexType.subscriptions, Replica[replica]),
                                 doc_type=ESDocType.subscription.name,
                                 id=uuid)
    except NotFoundError as ex:
        raise DSSException(requests.codes.not_found, "not_found", "Cannot find subscription!")

    stored_metadata = response['_source']

    if stored_metadata['owner'] != authenticated_user_email:
        # common_error_handler defaults code to capitalized 'Forbidden' for Werkzeug exception. Keeping consistent.
        raise DSSException(requests.codes.forbidden, "Forbidden", "Your credentials can't access this subscription!")

    #  get all indexes that use current alias
    alias_name = Config.get_es_alias_name(ESIndexType.docs, Replica[replica])
    doc_indexes = _get_indexes_by_alias(es_client, alias_name)
    _unregister_percolate(es_client, doc_indexes, uuid)

    es_client.delete(index=Config.get_es_index_name(ESIndexType.subscriptions, Replica[replica]),
                     doc_type=ESDocType.subscription.name,
                     id=uuid)

    timestamp = datetime.datetime.utcnow()
    time_deleted = timestamp.strftime("%Y-%m-%dT%H%M%S.%fZ")

    return jsonify({'timeDeleted': time_deleted}), requests.codes.okay
项目:data-store    作者:HumanCellAtlas    | 项目源码 | 文件源码
def _get_indexes_by_alias(es_client: Elasticsearch, alias_name: str):
    try:
        return list(es_client.indices.get_alias(alias_name).keys())
    except NotFoundError:
        return []
项目:spamscope    作者:SpamScope    | 项目源码 | 文件源码
def update_nr_replicas(es, max_retry, nr_replicas, index):
    for i in range(max_retry, 0, -1):
        try:
            es.indices.put_settings(
                body={"index": {"number_of_replicas": int(nr_replicas)}},
                index=index)
            log.info("Updating replicas done")
            return

        except (ConnectionError, NotFoundError):
            log.warning(
                "Updating replicas failed. Waiting for {} sec".format(i))
            time.sleep(i)

    log.error("Updating replicas definitely failed")
项目:mist.api    作者:mistio    | 项目源码 | 文件源码
def get_story(owner_id, story_id, story_type=None, expand=True):
    """Fetch a single story given its story_id."""
    assert story_id
    story = get_stories(owner_id=owner_id, stories=story_id,
                        story_type=story_type, expand=expand)
    if not story:
        msg = 'Story %s' % story_id
        if story_type:
            msg += ' [%s]' % story_type
        raise NotFoundError(msg)
    if len(story) > 1:
        log.error('Found multiple stories with story_id %s', story_id)
    return story[0]
项目:mist.api    作者:mistio    | 项目源码 | 文件源码
def delete_story(owner_id, story_id):
    """Delete a story."""
    index = 'app-logs-*'
    query = {
        'query': {
            'bool': {
                'filter': {
                    'bool': {
                        'must': [
                            {'term': {'owner_id': owner_id}},
                            {'term': {'story_id': story_id}},
                        ]
                    }
                }
            }
        }
    }
    # Delete all documents matching the above query.
    result = es().delete_by_query(index=index, body=query, conflicts='proceed')
    if not result['deleted']:
        raise NotFoundError('story_id %s' % story_id)
    # Report results.
    msg = 'Deleted %s log(s) with story_id %s' % (result['deleted'], story_id)
    if result['version_conflicts']:
        msg += ' Counted %s version_conflicts' % result['version_conflicts']
    if result['failures']:
        msg += ' Finished with failures: %s' % result['failures']
    log.warn(msg)
项目:django_simple_forums    作者:cdriehuys    | 项目源码 | 文件源码
def test_remove_invalid_pk(self):
        """ Test removing an object that is not in the index.

        Removing an object that is not in the index should raise a
        NotFoundError
        """
        thread = create_thread()
        self.backend.add(thread)
        self.backend.remove(thread)
        # try removing it after it's been removed
        with self.assertRaises(NotFoundError):
            self.backend.remove(thread)
项目:bouncer    作者:hypothesis    | 项目源码 | 文件源码
def test_annotation_increments_stat_if_get_raises_not_found_error(self,
                                                                      statsd):
        request = mock_request()
        request.es.get.side_effect = es_exceptions.NotFoundError

        try:
            views.AnnotationController(request).annotation()
        except:
            pass

        statsd.incr.assert_called_once_with(
            "views.annotation.404.annotation_not_found")
项目:bouncer    作者:hypothesis    | 项目源码 | 文件源码
def test_annotation_raises_http_not_found_if_get_raises_not_found(self):
        request = mock_request()
        request.es.get.side_effect = es_exceptions.NotFoundError

        with pytest.raises(httpexceptions.HTTPNotFound):
            views.AnnotationController(request).annotation()
项目:Tattle    作者:nickmaccarthy    | 项目源码 | 文件源码
def es_search(es, *args, **kwargs):
    try:
        results = es.search(request_timeout=10, **kwargs) 
    except NotFoundError:
        logger.debug('Index not found: args: {}, kwargs: {}'.format(args, kwargs))
        return

    return results
项目:metastore    作者:datahq    | 项目源码 | 文件源码
def query(kind, userid, size=50, **kw):
    kind_params = ENABLED_SEARCHES.get(kind)
    try:
        # Arguments received from a network request come in kw, as a mapping
        # between param_name and a list of received values.
        # If size was provided by the user, it will be a list, so we take its
        # first item.
        if type(size) is list:
            size = size[0]
            if int(size) > 100:
                size = 100

        from_ = int(kw.pop('from', [0])[0])

        api_params = dict([
            ('index', kind_params['index']),
            ('doc_type', kind_params['doc_type']),
            ('size', size),
            ('from_', from_)
        ])

        body = build_dsl(kind_params, userid, kw)
        api_params['body'] = json.dumps(body)
        ret = _get_engine().search(**api_params)
        logging.info('Performing query %r', kind_params)
        logging.info('api_params %r', api_params)
        logging.info('ret %r', ret)
        if ret.get('hits') is not None:
            results = [hit['_source'] for hit in ret['hits']['hits']]
            total = ret['hits']['total']
            total_bytes = ret.get('aggregations')['total_bytes']['value']
        else:
            results = []
            total = 0
            total_bytes = 0
        return {
            'results': results,
            'summary': {
                "total": total,
                "totalBytes": total_bytes
            }
        }
    except (NotFoundError, json.decoder.JSONDecodeError, ValueError) as e:
        logging.error("query: %r" % e)
        return {
            'results': [],
            'summary': {
                "total": 0,
                "totalBytes": 0
            },
            'error': str(e)
        }
项目:kge-server    作者:vfrico    | 项目源码 | 文件源码
def generate_index(self, indexName):
        """Generates the index on Elasticsearch

        This method is intended to be used internally. It creates an index
        using certains parameters to get a better search performance.

        :params str indexName: Name of the new index
        """
        body = {'mappings': {
                            self.type: {
                                'properties': {},
                                'dynamic': True
                                }
                             },
                'settings': {
                    'analysis': {
                      'analyzer': {
                        'my_custom_analyzer': {
                          'type': 'custom',
                          'tokenizer': 'standard',
                          'filter': ['lowercase', 'my_ascii_folding']
                        }
                      },
                      'filter': {
                        'my_ascii_folding': {
                            'type': 'asciifolding',
                            'preserve_original': True
                        }
                      }
                    }
                  }}
        suggest_field = {
            'type': 'completion',
            'analyzer': 'my_custom_analyzer',
            'search_analyzer': 'standard',
            'preserve_separators': False,
            'preserve_position_increments': False
        }
        body['mappings'][self.type]['properties'] = {
            'entity_id': {'type': 'string'},
            'entity_uri': {'type': 'string'},
            'description': {'type': 'object'},
            'label': {'type': 'object'},
            'alt_label': {'type': 'object'},
            'label_suggest': suggest_field
        }
        try:
            self.es.indices.delete(index=indexName)
        except es_exceptions.NotFoundError:
            pass
        self.es.indices.create(index=indexName, body=body)