Python redis 模块,delete() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用redis.delete()

项目:dati-ckan-docker    作者:italia    | 项目源码 | 文件源码
def gather_stage(harvester, job):
    '''Calls the harvester's gather_stage, returning harvest object ids, with
    some error handling.

    This is split off from gather_callback so that tests can call it without
    dealing with queue stuff.
    '''
    job.gather_started = datetime.datetime.utcnow()

    try:
        harvest_object_ids = harvester.gather_stage(job)
    except (Exception, KeyboardInterrupt):
        harvest_objects = model.Session.query(HarvestObject).filter_by(
            harvest_job_id=job.id
        )
        for harvest_object in harvest_objects:
            model.Session.delete(harvest_object)
        model.Session.commit()
        raise
    finally:
        job.gather_finished = datetime.datetime.utcnow()
        job.save()
    return harvest_object_ids
项目:TweenRoBot    作者:ThisIsAmir    | 项目源码 | 文件源码
def kick(m):
    if m.from_user.id ==  223404066:
        redis.delete('banlist')
        bot.send_message(m.chat.id, '<code>Cleaned :(</code>',parse_mode='HTML')



#################################################################################################################################################################################################
项目:dati-ckan-docker    作者:italia    | 项目源码 | 文件源码
def resubmit_jobs():
    '''
    Examines the fetch and gather queues for items that are suspiciously old.
    These are removed from the queues and placed back on them afresh, to ensure
    the fetch & gather consumers are triggered to process it.
    '''
    if config.get('ckan.harvest.mq.type') != 'redis':
        return
    redis = get_connection()

    # fetch queue
    harvest_object_pending = redis.keys(get_fetch_routing_key() + ':*')
    for key in harvest_object_pending:
        date_of_key = datetime.datetime.strptime(redis.get(key),
                                                 "%Y-%m-%d %H:%M:%S.%f")
        # 3 minutes for fetch and import max
        if (datetime.datetime.now() - date_of_key).seconds > 180:
            redis.rpush(get_fetch_routing_key(),
                json.dumps({'harvest_object_id': key.split(':')[-1]})
            )
            redis.delete(key)

    # gather queue
    harvest_jobs_pending = redis.keys(get_gather_routing_key() + ':*')
    for key in harvest_jobs_pending:
        date_of_key = datetime.datetime.strptime(redis.get(key),
                                                 "%Y-%m-%d %H:%M:%S.%f")
        # 3 hours for a gather
        if (datetime.datetime.now() - date_of_key).seconds > 7200:
            redis.rpush(get_gather_routing_key(),
                json.dumps({'harvest_job_id': key.split(':')[-1]})
            )
            redis.delete(key)
项目:dati-ckan-docker    作者:italia    | 项目源码 | 文件源码
def basic_ack(self, message):
        self.redis.delete(self.persistance_key(message))
项目:spiders    作者:poodarchu    | 项目源码 | 文件源码
def del_all(redis=r):
    keys = redis.keys('*')
    for k in keys:
        print 'Deleting:', k, 'result is', redis.delete(k)
项目:ray    作者:ray-project    | 项目源码 | 文件源码
def _clean_up_entries_from_shard(self, object_ids, task_ids, shard_index):
        redis = self.state.redis_clients[shard_index]
        # Clean up (in the future, save) entries for non-empty objects.
        object_ids_locs = set()
        object_ids_infos = set()
        for object_id in object_ids:
            # OL.
            obj_loc = redis.zrange(OBJECT_LOCATION_PREFIX + object_id, 0, -1)
            if obj_loc:
                object_ids_locs.add(object_id)
            # OI.
            obj_info = redis.hgetall(OBJECT_INFO_PREFIX + object_id)
            if obj_info:
                object_ids_infos.add(object_id)

        # Form the redis keys to delete.
        keys = [TASK_TABLE_PREFIX + k for k in task_ids]
        keys.extend([OBJECT_LOCATION_PREFIX + k for k in object_ids_locs])
        keys.extend([OBJECT_INFO_PREFIX + k for k in object_ids_infos])

        if not keys:
            return
        # Remove with best effort.
        num_deleted = redis.delete(*keys)
        log.info(
            "Removed {} dead redis entries of the driver from redis shard {}.".
            format(num_deleted, shard_index))
        if num_deleted != len(keys):
            log.warning(
                "Failed to remove {} relevant redis entries"
                " from redis shard {}.".format(len(keys) - num_deleted))
项目:ray    作者:ray-project    | 项目源码 | 文件源码
def _entries_for_driver_in_shard(self, driver_id, redis_shard_index):
        """Collect IDs of control-state entries for a driver from a shard.

        Args:
            driver_id: The ID of the driver.
            redis_shard_index: The index of the Redis shard to query.

        Returns:
            Lists of IDs: (returned_object_ids, task_ids, put_objects). The
                first two are relevant to the driver and are safe to delete.
                The last contains all "put" objects in this redis shard; each
                element is an (object_id, corresponding task_id) pair.
        """
        # TODO(zongheng): consider adding save & restore functionalities.
        redis = self.state.redis_clients[redis_shard_index]
        task_table_infos = {}  # task id -> TaskInfo messages

        # Scan the task table & filter to get the list of tasks belong to this
        # driver.  Use a cursor in order not to block the redis shards.
        for key in redis.scan_iter(match=TASK_TABLE_PREFIX + b"*"):
            entry = redis.hgetall(key)
            task_info = TaskInfo.GetRootAsTaskInfo(entry[b"TaskSpec"], 0)
            if driver_id != task_info.DriverId():
                # Ignore tasks that aren't from this driver.
                continue
            task_table_infos[task_info.TaskId()] = task_info

        # Get the list of objects returned by these tasks.  Note these might
        # not belong to this redis shard.
        returned_object_ids = []
        for task_info in task_table_infos.values():
            returned_object_ids.extend([
                task_info.Returns(i) for i in range(task_info.ReturnsLength())
            ])

        # Also record all the ray.put()'d objects.
        put_objects = []
        for key in redis.scan_iter(match=OBJECT_INFO_PREFIX + b"*"):
            entry = redis.hgetall(key)
            if entry[b"is_put"] == "0":
                continue
            object_id = key.split(OBJECT_INFO_PREFIX)[1]
            task_id = entry[b"task"]
            put_objects.append((object_id, task_id))

        return returned_object_ids, task_table_infos.keys(), put_objects
项目:rosie-ci    作者:adafruit    | 项目源码 | 文件源码
def travis():
    signature = base64.b64decode(request.headers.get('Signature'))
    try:
        public_key = _get_travis_public_key()
    except requests.Timeout:
        print("Timed out when attempting to retrieve Travis CI public key")
        abort(500)
    except requests.RequestException as e:
        print("Failed to retrieve Travis CI public key")
        abort(500)
    try:
        check_authorized(signature, public_key, request.form["payload"])
    except SignatureError:
        abort(401)
    data = json.loads(request.form["payload"])

    repo = data["repository"]["owner_name"] + "/" + data["repository"]["name"]
    build_number = data["id"]
    sha = data["commit"]
    if data["type"] == "pull_request":
        sha = data["head_commit"]
    tag = None
    if data["type"] == "push" and data["tag"] != None:
        tag = data["tag"]
    print(data)

    key = sha
    if tag is not None:
        key = tag

    upload_lock = "upload-lock:" + sha

    if data["state"] in ("started", ):
        print("travis started", key)
        # Handle pulls differently.
        if data["pull_request"]:
            load_code.delay(repo, "pull/" + str(data["pull_request_number"]) + "/head")
        elif data["tag"]:
            load_code.delay(repo, "refs/tags/" + tag)
        else:
            load_code.delay(repo, "refs/heads/" + data["branch"])
        redis.setex(upload_lock, 20 * 60, "locked")
        set_status(repo, sha, "pending", data["build_url"], "Waiting on Travis to complete.")
    elif data["state"] in ("passed", "failed"):
        print("travis finished")
        key = repo + "/" + key
        set_status(repo, sha, "pending", "https://rosie-ci.ngrok.io/log/" + key, "Queueing Rosie test.")
        redis.delete(upload_lock)
        test_commit(repo, sha, tag)
    elif data["state"] is ("cancelled", ):
        print("travis cancelled")
        redis.delete(upload_lock)
        set_status(repo, sha, "error", data["build_url"], "Travis cancelled.")
    elif data["status"] is None:
        set_status(repo, sha, "error", data["build_url"], "Travis error.")
    else:
        print("unhandled state:", data["state"])
        print(data)
    return jsonify({'status': 'received'})