Python redis 模块,StrictRedis() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用redis.StrictRedis()

项目:django-heartbeat    作者:pbs    | 项目源码 | 文件源码
def check(request):
    host = settings.CACHEOPS_REDIS.get('host', '')
    port = settings.CACHEOPS_REDIS.get('port', 0)
    db = settings.CACHEOPS_REDIS.get('db', 0)
    socket_timeout = settings.CACHEOPS_REDIS.get('socket_timeout')

    try:
        redis_con = redis.StrictRedis(
            host=host, port=port, db=db, socket_timeout=socket_timeout)
        ping = redis_con.ping()
    except NameError:
        return {'error': 'cannot import redis library'}
    except ConnectionError as e:
        return {'error': str(e)}

    return {
            'ping': ping,
            'version': redis_con.info().get('redis_version')
        }
项目:Url    作者:beiruan    | 项目源码 | 文件源码
def __init__(self, name, host='localhost', port=6379, db=0,
                 maxsize=0, lazy_limit=True, password=None, cluster_nodes=None):
        """
        Constructor for RedisQueue

        maxsize:    an integer that sets the upperbound limit on the number of
                    items that can be placed in the queue.
        lazy_limit: redis queue is shared via instance, a lazy size limit is used
                    for better performance.
        """
        self.name = name
        if(cluster_nodes is not None):
            from rediscluster import StrictRedisCluster
            self.redis = StrictRedisCluster(startup_nodes=cluster_nodes)
        else:
            self.redis = redis.StrictRedis(host=host, port=port, db=db, password=password)
        self.maxsize = maxsize
        self.lazy_limit = lazy_limit
        self.last_qsize = 0
项目:enteletaor    作者:cr0hn    | 项目源码 | 文件源码
def action_redis_server_connected(config):
    """
    Dump all redis information
    """
    log.warning("  - Trying to connect with redis server...")

    # Connection with redis
    con = redis.StrictRedis(host=config.target, port=config.port, db=config.db)

    log.error("Connected users to '%s':" % config.target)

    for c in con.client_list():

        # Skip local host connections
        client = c['addr']
        db = c['db']

        log.error("  - %s (DB: %s)" % (client, db))
项目:enteletaor    作者:cr0hn    | 项目源码 | 文件源码
def brute_redis(host, port=6379, user=None, password=None, db=0):

    try:
        redis.StrictRedis(host=host,
                          port=int(port),
                          socket_connect_timeout=1,
                          socket_timeout=1,
                          password=password,
                          db=db).ping()
        return True

    except redis.exceptions.ResponseError as e:
        if str(e).startswith("NOAUTH"):
            raise AuthRequired()
        else:
            return False
    except Exception:
        return False


# ----------------------------------------------------------------------
项目:bitrader    作者:jr-minnaar    | 项目源码 | 文件源码
def __init__(self, cache: bool = False, future: bool = True):
        if cache:
            redis_conn = redis.StrictRedis(host='redis')
            self.session = requests_cache.core.CachedSession(
                cache_name='api_cache',
                backend='redis', expire_after=60 * 60 * 24 * 30,
                allowable_codes=(200,),
                allowable_methods=('GET',),
                old_data_on_error=False,
                connection=redis_conn,
            )
        else:
            self.session = session()
        if future:
            self.future_session = FuturesSession(max_workers=10, session=self.session)
        self.url = self.url_template.format(resource='', token=self.token)
项目:lushi8    作者:ieiayaobb    | 项目源码 | 文件源码
def handle(self, *args, **options):
        redis_instance = redis.StrictRedis(host=REDIS_HOST, db=REDIS_DB, password=REDIS_PASSWORD)
        # redis_instance = redis.StrictRedis(host='127.0.0.1', db=7)
        for key in redis_instance.scan_iter("Chairman:*"):
            redis_instance.delete(key)

        fetcher = Fetcher()
        fetcher.fetch_cc()
        fetcher.fetch_douyu()
        fetcher.fetch_longzhu()
        fetcher.fetch_quanmin()
        fetcher.fetch_xiongmao()
        fetcher.fetch_zhanqi()
        fetcher.fetch_huya()

        for chairman in fetcher.chairmans:
            try:
                if chairman.is_valid():
                    chairman.save()
                else:
                    print chairman.errors
            except Exception, e:
                print e
项目:lushi8    作者:ieiayaobb    | 项目源码 | 文件源码
def refresh_rank():
    redis_instance = redis.StrictRedis(host=REDIS_HOST, db=REDIS_DB, password=REDIS_PASSWORD)
    for key in redis_instance.scan_iter("Chairman:*"):
        redis_instance.delete(key)

    fetcher = Fetcher()
    fetcher.fetch_cc()
    fetcher.fetch_douyu()
    fetcher.fetch_longzhu()
    fetcher.fetch_quanmin()
    fetcher.fetch_xiongmao()
    fetcher.fetch_zhanqi()
    fetcher.fetch_huya()

    for chairman in fetcher.chairmans:
        if chairman.is_valid():
            chairman.save()
        else:
            print chairman.errors
项目:Software-Architecture-with-Python    作者:PacktPublishing    | 项目源码 | 文件源码
def memoize(func, ttl=86400):
    """ A memory caching decorator """

    # Local redis as in-memory cache
    cache = StrictRedis(host='localhost', port=6379)

    def wrapper(*args, **kwargs):

        # Construct a unique cache filename
        key = unique_key(args[0], args[1])
        # Check if its in redis
        cached_data = cache.get(key)
        if cached_data != None:
            print('=>from cache<=')
            return json.loads(cached_data)

        # Else calculate and store while putting a TTL
        result = func(*args, **kwargs)
        cache.set(key, json.dumps(result), ttl)

        return result

    return wrapper
项目:corvus-web-public    作者:eleme    | 项目源码 | 文件源码
def get_info(app, host, port):
        r = redis.StrictRedis(host, port, socket_timeout=0.1)
        cmd = '{} get maxmemory'.format(app.config['REDIS_CONFIG_CMD'])
        p = r.pipeline()
        p.info()
        p.execute_command(cmd)
        result = p.execute()
        info = result[0]

        if 'db0' in info:
            expires = info['db0']['expires']
            keys = info['db0']['keys']
        else:
            expires = 0
            keys = 0
        return {
            'memory': info['used_memory'],
            'maxmemory': int(result[1][1]),
            'connected_clients': info['connected_clients'],
            'total_commands_processed': info['total_commands_processed'],
            'total_keys': keys,
            'expires_keys': expires,
            'keyspace_misses': info['keyspace_misses'],
            'keyspace_hits': info['keyspace_hits'],
        }
项目:Stock-Visualizer    作者:saguo    | 项目源码 | 文件源码
def consume_messages(cls, **kwargs):
        def job(consumer_, redis_client_, redis_channel_):
            for msg in consumer_.poll():
                message = msg.value
                logger.info(ujson.loads(message))
                redis_client_.publish(redis_channel_, message)

        def shut_down(consumer_):
            consumer_.shut_down()

        # get consumer
        kafka_broker = kwargs.get(KAFKA_BROKER) or DEFAULT_KAFKA_BROKER
        kafka_topic = kwargs.get(KAFKA_OUTPUT_TOPIC) or DEFAULT_KAFKA_OUTPUT_TOPIC
        consumer = Consumer(kafka_broker, kafka_topic)

        # get redis
        redis_channel = kwargs.get(REDIS_CHANNEL) or DEFAULT_REDIS_CHANNEL
        redis_host = kwargs.get(REDIS_HOST) or DEFAULT_REDIS_HOST
        redis_port = kwargs.get(REDIS_PORT) or DEFAULT_REDIS_PORT
        redis_client = redis.StrictRedis(host=redis_host, port=redis_port)

        atexit.register(shut_down, consumer)

        scheduler = Scheduler(1, job, consumer, redis_client, redis_channel)
        scheduler.run()
项目:ChronosES    作者:belvedere-trading    | 项目源码 | 文件源码
def __init__(self, aggregateClass, **kwargs): #pylint: disable=W0613
        super(DefaultCoreProvider, self).__init__(aggregateClass)
        self.aggregateName = self.aggregateClass.__name__
        postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort = GetPostgresConnectionDetails(self.infrastructureProvider)
        redisHost, redisPort, redisConfig = GetRedisConnectionDetails(self.infrastructureProvider)

        try:
            self.redisConnection = StrictRedis(host=redisHost, port=redisPort, **redisConfig)
        except Exception:
            EventLogger.LogExceptionAuto(self, 'Failed to initialize Notifier')
            raise
        self._notifier = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.Notifier, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                           redisConnection=self.redisConnection)

        self.logicConnection = PostgresLogicConnection(postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._logicStore = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.LogicStore, postgresConnection=self.logicConnection) #pylint: disable=C0103

        self.eventPersisterConnection = PostgresEventWriteConnection(self.aggregateName, postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._eventPersister = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.EventPersister, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                                 notifier=self._notifier, postgresConnection=self.eventPersisterConnection)

        self.eventReaderConnection = PostgresEventReadConnection(self.aggregateName, postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._eventReader = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.EventReader, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                              postgresConnection=self.eventReaderConnection)
项目:ChronosES    作者:belvedere-trading    | 项目源码 | 文件源码
def __init__(self, aggregateClass, **kwargs): #pylint: disable=W0613
        super(DefaultCoreProvider, self).__init__(aggregateClass)
        self.aggregateName = self.aggregateClass.__name__
        postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort = GetPostgresConnectionDetails(self.infrastructureProvider)
        redisHost, redisPort, redisConfig = GetRedisConnectionDetails(self.infrastructureProvider)

        try:
            self.redisConnection = StrictRedis(host=redisHost, port=redisPort, **redisConfig)
        except Exception:
            EventLogger.LogExceptionAuto(self, 'Failed to initialize Notifier')
            raise
        self._notifier = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.Notifier, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                           redisConnection=self.redisConnection)

        self.logicConnection = PostgresLogicConnection(postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._logicStore = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.LogicStore, postgresConnection=self.logicConnection) #pylint: disable=C0103

        self.eventPersisterConnection = PostgresEventWriteConnection(self.aggregateName, postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._eventPersister = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.EventPersister, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                                 notifier=self._notifier, postgresConnection=self.eventPersisterConnection)

        self.eventReaderConnection = PostgresEventReadConnection(self.aggregateName, postgresUser, postgresPassword, postgresDatabase, postgresHost, postgresPort)
        self._eventReader = self.infrastructureProvider.GetConfigurablePlugin(ConfigurablePlugin.EventReader, aggregateClass=self.aggregateClass, #pylint: disable=C0103
                                                                              postgresConnection=self.eventReaderConnection)
项目:trading_package    作者:abrahamchaibi    | 项目源码 | 文件源码
def __init__(self, currency: Currency, qty: str = 0, min_fraction: Optional[str] = None,
                 max_fraction: Optional[str] = None) -> None:
        self.persistent_redis_server = StrictRedis(host='localhost', port=6379, db=1)
        self.currency = currency
        self.qty = Decimal(qty)

        try:
            self.min_fraction = min_fraction or PORTFOLIO_MAKEUP[currency.name][0]
        except (KeyError, IndexError):
            self.min_fraction = '0.0'
        self.min_fraction = Decimal(self.min_fraction)

        try:
            self.max_fraction = max_fraction or PORTFOLIO_MAKEUP[currency.name][1]
        except (KeyError, IndexError):
            self.max_fraction = '1.0'
        self.max_fraction = Decimal(self.max_fraction)
项目:open-source-feeds    作者:mhfowler    | 项目源码 | 文件源码
def main():
    redis_connection = StrictRedis(
        host=ENV_DICT.get('REDIS_HOST', 'localhost'),
        port=ENV_DICT.get('REDIS_PORT', 6379),
        db=ENV_DICT.get('REDIS_DB', 0),
        password=ENV_DICT.get('REDIS_PASSWORD')
    )
    interval = ENV_DICT.get('RQ_SCHEDULER_POLLING_INTERVAL', 60)
    verbose = ENV_DICT.get('RQ_SCHEDULER_VERBOSE_OUTPUT', False)
    burst = ENV_DICT.get('RQ_SCHEDULER_BURST_MODE', False)

    if verbose:
        log_level = 'DEBUG'
    else:
        log_level = 'INFO'
    setup_loghandlers(log_level)

    scheduler = Scheduler(connection=redis_connection, interval=interval)
    scheduler.run(burst=burst)
项目:skills-labeller    作者:workforce-data-initiative    | 项目源码 | 文件源码
def __init__(self,
                 host=None,
                 port=None,
                 cmd=" ".join([VW_CMD, VW_ARGS])):
        self.SKILL_CANDIDATES = "candidates" # backing for ordered importances
        self.TIMESTAMP = "timestamp" # string of last timestamp value
        self.REDIS = "redis" # Host name Redis container in service docker network
        self.cmd = cmd
        self.host = host
        self.port = port
        self.oracle = None

        command = None
        if not self.check_socket(host=self.host, port=self.port):
            command = self.cmd
        # Stand up/Connect to an instance of vowpal wabbit
        self.oracle = DaemonVWProcess(command=self.cmd,
                                      port=self.port,
                                      ip=self.host)

        self.redis_db = redis.StrictRedis(host=self.REDIS)# defaults to redis:6379
项目:TAC-Airflow-Plugin    作者:vipul-tm    | 项目源码 | 文件源码
def get_conn(self):
        """
        Returns a redis connection object
        """
        conn_config = {
            "host": self.conn.host or 'localhost',
            "db": self.conn.schema or ''
        }

        if not self.conn.port:
            conn_config["port"] = 6379
        else:
            conn_config["port"] = int(self.conn.port)

        conn = StrictRedis(**conn_config)
        return conn

    #to set redis ey
项目:redis-astra    作者:pilat    | 项目源码 | 文件源码
def __init__(self, pk=None, **kwargs):
        self._fields = dict()
        self._helpers = set()
        self._hash = {}  # Hash-object cache
        self._hash_loaded = False
        self._fields_loaded = False
        assert isinstance(self.database, redis.StrictRedis)

        if pk is None:
            raise ValueError('You\'re must pass pk for object')
        self.pk = str(pk)  # Always convert to str for key-safe ops.

        # When object initialize with parameters, for example
        # user1 = UserObject(1, name='Username'), then load fields from/to db
        # immediate. When object initialized as user2 = UserObject(1), then
        # information from database not obtain before first data handling
        if kwargs:
            self._load_fields(**kwargs)
项目:opsweb    作者:wylok    | 项目源码 | 文件源码
def slow_redis():
    form = MyForm.myform()
    if form.submit_redis.data:
        for ip in redis_ips:
            Redis = redis.StrictRedis(host=ip, port=6379, db=0, socket_timeout=1)
            Redis.slowlog_reset()
            Redis.config_set("slowlog-max-len", 100)
            Redis.config_set("slowlog-log-slower-than", 200000)
    for ip in redis_ips:
        try:
            Redis = redis.StrictRedis(host=ip,port=6379,db=0,socket_timeout=1)
            results = Redis.slowlog_get(100)
        except:
            logging.error('%s Timeout reading from socket!' %ip)
            continue
        if results:
            flash("redis server:%s" %ip)
            flash("slow log len:%s" %Redis.slowlog_len())
            flash(results)
    return render_template('slow_redis_show.html',Main_Infos=g.main_infos,form = form)
项目:opsweb    作者:wylok    | 项目源码 | 文件源码
def _RC_Run(key, port, action):
    Tpyes = dict(hash="RC.hgetall(key)", list="RC.lrange(key,0,-1)", string="RC.get(key)",
                 zset="RC.zrange(key,0,-1,withscores=True)", set="RC.smembers(key)")
    if port == 6379:
        app = Flask(__name__)
        app.config.from_pyfile('../conf/redis.conf')
        nodes = app.config.get('NODES_PRODUCE')
        RC = RedisCluster(startup_nodes=nodes, decode_responses=True)
    else:
        RC = redis.StrictRedis(host='redis.service.baihe', port=port)
    T = RC.type(key)
    if T == 'none':
        flash('????:{0}'.format(T))
    else:
        if action == 'clear':
            RC.delete(key)
            flash('????????!')
        return eval(Tpyes.get(T))
项目:nanobox-adapter-libcloud    作者:nanobox-io    | 项目源码 | 文件源码
def _find_server(self, driver, id):
        for server in driver.list_nodes():
            if server.name == id:
                return server

        r = redis.StrictRedis(host=os.getenv('DATA_REDIS_HOST'))
        status = r.get('%s:server:%s:status' % (self.id, id))

        if status:
            return Node(
                id = id,
                name = id,
                state = status,
                public_ips = [],
                private_ips = [],
                driver = driver,
                extra = {}
            )

    # Internal-only methods
项目:dHydra    作者:Emptyset110    | 项目源码 | 文件源码
def get_redis(self, config="redis.json"):
        import os
        if "/" in config:
            cfg = util.read_config(config)
        else:
            cfg = util.read_config(
                os.path.join(os.getcwd(), "config", config)
            )
        host = cfg["host"]
        port = cfg["port"]

        try:
            self.logger.info("Trying to connect to redis")
            self.redis = redis.StrictRedis(
                decode_responses=True,
                host=host,
                port=port
            )
            self.redis.client_list()
            return self.redis
        except:
            self.logger.warning("Failed to connect to redis")
            return False
项目:dd-trace-py    作者:DataDog    | 项目源码 | 文件源码
def patch():
    """Patch the instrumented methods

    This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top
    of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented.
    """
    if getattr(redis, '_datadog_patch', False):
        return
    setattr(redis, '_datadog_patch', True)

    _w = wrapt.wrap_function_wrapper
    _w('redis', 'StrictRedis.execute_command', traced_execute_command)
    _w('redis', 'StrictRedis.pipeline', traced_pipeline)
    _w('redis', 'Redis.pipeline', traced_pipeline)
    _w('redis.client', 'BasePipeline.execute', traced_execute_pipeline)
    _w('redis.client', 'BasePipeline.immediate_execute_command', traced_execute_command)
    Pin(service="redis", app="redis", app_type="db").onto(redis.StrictRedis)
项目:tasker    作者:wavenator    | 项目源码 | 文件源码
def __init__(
        self,
        nodes,
    ):
        super().__init__()

        self.nodes = nodes

        self.connections = [
            redis.StrictRedis(
                host=node['host'],
                port=node['port'],
                password=node['password'],
                db=node['database'],
                retry_on_timeout=True,
                socket_keepalive=True,
                socket_connect_timeout=10,
                socket_timeout=60,
            )
            for node in nodes
        ]

        self.master_connection = self.connections[0]

        random.shuffle(self.connections)
项目:tasker    作者:wavenator    | 项目源码 | 文件源码
def __init__(
        self,
        host,
        port,
        password,
        database,
    ):
        super().__init__()

        self.host = host
        self.port = port
        self.password = password
        self.database = database

        self.connection = redis.StrictRedis(
            host=self.host,
            port=self.port,
            password=self.password,
            db=self.database,
            retry_on_timeout=True,
            socket_keepalive=True,
            socket_connect_timeout=10,
            socket_timeout=60,
        )
项目:og-miner    作者:opendns    | 项目源码 | 文件源码
def __init__(self, push, pull, redis_conf):
        super(MinerClient, self).__init__()

        print("Connecting to Redis cache {} ...".format(redis_conf))
        redis_host, redis_port, redis_db = redis_conf.split(":")
        self.redis = redis.StrictRedis(host=redis_host, port=int(redis_port), db=int(redis_db))
        self.redis.setnx('transaction', 0)
        # NOTE: Expiration times for pending/processed tasks in seconds.
        self.transaction_expiration = 60 * 60
        self.result_expiration = 60 * 10

        context = zmq.Context()

        print("Connecting to push socket '{}' ...".format(push))
        self.push = context.socket(zmq.PUSH)
        self.push.connect(push)

        print("Binding to pull socket '{}' ...".format(pull))
        self.pull = context.socket(zmq.PULL)
        self.pull.bind(pull)
项目:pyEfi    作者:lucid281    | 项目源码 | 文件源码
def __init__(self, type='sock',
                 hostOrSocket='/var/run/redis/redis.sock',
                 database=0, decode=True
                 ):
        """Setup redis connection here..."""
        try:
            if 'sock' is type:
                self.redisDb = redis.StrictRedis(unix_socket_path=hostOrSocket,
                                                 decode_responses=decode, db=database)
            elif 'ip' is type:
                self.redisDb = redis.StrictRedis(host=hostOrSocket, port=6379,
                                                 decode_responses=decode, db=database)
            else:
                ttyP(7, "redis conf did not work. exiting...")
                exit(1)
        finally:
            # this always prints, helpful debugging
            endc = '\033[0m'
            ttyP(4, "  redis @ " + endc + hostOrSocket)
项目:SkySpyWatch    作者:nstarpost    | 项目源码 | 文件源码
def consume():
    """Creates mongo, redis, and rabbitmq connections; consumes queue."""
    logger.debug("Consume started")
    redis_host = 'localhost'
    redis_port = 6379
    # connect to mongodb
    client = MongoClient()
    dbmongo = client.rt_flights_test
    # connect to redis
    r = redis.StrictRedis(host=redis_host, port=redis_port, db=0, decode_responses=True)
    # connect to rabbitmq and create queue
    connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
    channel = connection.channel()
    task_queue = channel.queue_declare(queue=queue_name, durable=True)
    channel.basic_qos(prefetch_count=1)
    # start pulling data off the queue
    channel.basic_consume(lambda ch, method, properties, body: callback(ch, method, properties, body, r, dbmongo), queue=queue_name)
    channel.start_consuming()
    client.close()
    return 0
项目:incubator-airflow-old    作者:apache    | 项目源码 | 文件源码
def get_conn(self):
        """
        Returns a Redis connection.
        """
        if not self.client:
            self.log.debug(
                'generating Redis client for conn_id "%s" on %s:%s:%s',
                self.redis_conn_id, self.host, self.port, self.db
            )
            try:
                self.client = StrictRedis(
                    host=self.host,
                    port=self.port,
                    password=self.password,
                    db=self.db)
            except Exception as general_error:
                raise AirflowException(
                    'Failed to create Redis client, error: {error}'.format(
                        error=str(general_error)
                    )
                )

        return self.client
项目:juicer    作者:eubr-bigsea    | 项目源码 | 文件源码
def start(self):
        signal.signal(signal.SIGTERM, self._terminate_minions)
        log.info(_('Starting master process. Reading "start" queue'))

        parsed_url = urlparse.urlparse(
            self.config['juicer']['servers']['redis_url'])
        redis_conn = redis.StrictRedis(host=parsed_url.hostname,
                                       port=parsed_url.port)

        # Start pending minions
        apps = [q.split('_')[-1] for q in redis_conn.keys('queue_app_*')]
        self.state_control = StateControlRedis(redis_conn)

        for app in apps:
            log.warn(_('Starting pending app {}').format(app))
            self._start_minion(app, app, self.state_control, self.platform)
        while True:
            self.read_start_queue(redis_conn)

    # noinspection PyMethodMayBeStatic
项目:yarnitor    作者:maxpoint    | 项目源码 | 文件源码
def main():
    """Creates a redis client, a YARN ResourceManager REST API client, and a YARN
    poller that puts information about the YARN cluster and its applications into
    redis on a timed interval.
    """
    log_level = os.getenv('LOG_LEVEL', 'INFO')
    logging.basicConfig(level=getattr(logging, log_level))

    host, port = os.environ['REDIS_ENDPOINT'].split(":")
    redis_client = redis.StrictRedis(host=host, port=port)
    yarn_handler = YARNHandler(os.environ['YARN_ENDPOINT'])

    ym = YARNPoller(redis_client, yarn_handler)
    ym.register_handler("SPARK", SparkHandler)
    ym.register_handler("MAPREDUCE", MapredHandler)
    ym.register_handler("MAPRED", MapredHandler)
    ym.loop(int(os.environ["YARN_POLL_SLEEP"]))
项目:dash-ticker    作者:chaeplin    | 项目源码 | 文件源码
def check_redis():
    if HOST_ROLE == 'MASTER':
        SETINEL_HOST = MASTER_SETINEL_HOST
        REDIS_MASTER = MASTER_REDIS_MASTER

    else:
        SETINEL_HOST = SLAVE_SETINEL_HOST
        REDIS_MASTER = SLAVE_REDIS_MASTER      

    s = redis.StrictRedis(host=SETINEL_HOST, port=26379, socket_timeout=0.1)
    try:
        h = s.execute_command("SENTINEL get-master-addr-by-name mymaster")[0].decode("utf-8")
        print(h)
        if h == REDIS_MASTER:
            print('Other host is redis master')
            sys.exit()

        else:
            pass

    except Exception as e:
        print(e.args[0])
        sys.exit()
项目:dash-ticker    作者:chaeplin    | 项目源码 | 文件源码
def check_redis():
    if HOST_ROLE == 'MASTER':
        SETINEL_HOST = MASTER_SETINEL_HOST
        REDIS_MASTER = MASTER_REDIS_MASTER

    else:
        SETINEL_HOST = SLAVE_SETINEL_HOST
        REDIS_MASTER = SLAVE_REDIS_MASTER

    s = redis.StrictRedis(host=SETINEL_HOST, port=26379, socket_timeout=0.1)
    try:
        h = s.execute_command("SENTINEL get-master-addr-by-name mymaster")[0].decode("utf-8")
        print(h)
        if h == REDIS_MASTER:
            print('Other host is redis master')
            sys.exit()

        else:
            pass

    except Exception as e:
        print(e.args[0])
        sys.exit()

#--------------
项目:socialhome    作者:jaywink    | 项目源码 | 文件源码
def get_redis_connection():
    return redis.StrictRedis(
        host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, password=settings.REDIS_PASSWORD,
    )
项目:dsq    作者:baverman    | 项目源码 | 文件源码
def store(request):
    cl = redis.StrictRedis()
    cl.flushdb()
    return QueueStore(cl)
项目:dsq    作者:baverman    | 项目源码 | 文件源码
def app(request):
    cl = redis.StrictRedis()
    cl.flushdb()
    return Application(Manager(QueueStore(cl), ResultStore(cl)))
项目:dsq    作者:baverman    | 项目源码 | 文件源码
def manager(request):
    cl = redis.StrictRedis()
    cl.flushdb()
    return Manager(QueueStore(cl), ResultStore(cl))
项目:dsq    作者:baverman    | 项目源码 | 文件源码
def store(request):
    cl = redis.StrictRedis()
    cl.flushdb()
    return ResultStore(cl)
项目:openedoo    作者:openedoo    | 项目源码 | 文件源码
def set_redis(key,data,secnd):
    try:
        r = redis.StrictRedis()
        data = json.dumps(data)
        dataset = (str(key),str(data))
        a = r.set('%s' %key,'%s' %data)
        #rdis.bgsave()
        b = r.expire('%s' %key, secnd)
        return a
    except Exception:
        return False
项目:openedoo    作者:openedoo    | 项目源码 | 文件源码
def get_redis(key):
    try:
        r = redis.StrictRedis()
        data = r.get(key)
        return json.loads(data)
    except Exception:
        return False
项目:headers    作者:oshp    | 项目源码 | 文件源码
def __init__(self, settings):
        self.r = redis.StrictRedis(
            host=settings['redis']['host'],
            port=settings['redis']['port'],
            db=settings['redis']['db'])
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def __init__(self, app=None):
        self.app = app
        self.master = StrictRedis()
        self.slave = self.master
        if app is not None: # pragma: no cover
            self.init_app(app)
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def setUp(self):
        super(TestNews, self).setUp()
        self.connection = StrictRedis()
        self.connection.flushall()
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def setUp(self):
        self.connection = StrictRedis()
        self.connection.flushall()
        self.guard = ContributionsGuard(self.connection)
        self.anon_user = {'user_id': None, 'user_ip': '127.0.0.1'}
        self.auth_user = {'user_id': 33, 'user_ip': None}
        self.task = Task(id=22)
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def setUp(self):
        super(TestMaintenance, self).setUp()
        self.connection = StrictRedis()
        self.connection.flushall()
项目:FRG-Crowdsourcing    作者:97amarnathk    | 项目源码 | 文件源码
def setUp(self):
        super(TestWebHooks, self).setUp()
        self.connection = StrictRedis()
        self.connection.flushall()
        self.project = ProjectFactory.create()
        self.webhook_payload = dict(project_id=self.project.id,
                                    project_short_name=self.project.short_name)
项目:lopocs    作者:Oslandia    | 项目源码 | 文件源码
def init(cls):
        cls.r = redis.StrictRedis(host='127.0.0.1',
                                  port=Config.STATS_SERVER_PORT, db=0)
        cls.r.set('rate', str(0.0).encode('utf-8'))
        cls.r.set('npoints', str(0).encode('utf-8'))
        cls.r.set('time_msec', str(0).encode('utf-8'))
项目:pykit    作者:baishancloud    | 项目源码 | 文件源码
def get_client(ip_port):

    ip_port = normalize_ip_port(ip_port)

    pid = os.getpid()

    with _lock:
        o = _pid_client[ip_port]

        if pid not in o:
            o[pid] = redis.StrictRedis(*ip_port)

    return _pid_client[ip_port][pid]
项目:gemstone    作者:vladcalin    | 项目源码 | 文件源码
def _get_connection(self):
        conn = getattr(_thread_local, "_redisconn", None)
        if conn:
            return conn

        conn = redis.StrictRedis(host=self.host, port=self.port, db=self.db)
        setattr(_thread_local, "_redisconn", conn)
        return conn
项目:gemstone    作者:vladcalin    | 项目源码 | 文件源码
def get_redis_connection(self):
        return redis.StrictRedis(connection_pool=self.connection_pool)
项目:graphql-python-subscriptions    作者:hballard    | 项目源码 | 文件源码
def __init__(self, host='localhost', port=6379, *args, **kwargs):
        redis.connection.socket = gevent.socket
        self.redis = redis.StrictRedis(host, port, *args, **kwargs)
        self.pubsub = self.redis.pubsub()
        self.subscriptions = {}
        self.sub_id_counter = 0
        self.greenlet = None