Python settings 模块,LOG_LEVEL 实例源码

我们从Python开源项目中,提取了以下3个代码示例,用于说明如何使用settings.LOG_LEVEL

项目:intel-manager-for-lustre    作者:intel-hpdd    | 项目源码 | 文件源码
def log_register(log_name):
    """
    Acquire a logger object, initialized with the global level and output options

    :param log_name: logger name (as in `logging.getLogger`), will be prefixed to log lines
    :return: A `logging.Logger` instance.
    """
    logger = logging.getLogger(log_name)
    logger.setLevel(settings.LOG_LEVEL)

    if _enable_stdout:
        _add_stream_handler(logger)
    if _log_filename:
        _add_file_handler(logger)
    if not _log_filename and not _enable_stdout:
        # Prevent 'No handlers could be found' spam
        logger.addHandler(MemoryHandler(0))

    _loggers.add(logger)
    return logger
项目:intel-manager-for-lustre    作者:intel-hpdd    | 项目源码 | 文件源码
def custom_log_register(log_name, filename=None, use_formatter=True):
    """Create another custom log handler to an optional file

    logger can have a file handler, or no handler at all.  In the second case
    you can then add your own handlers after the call to this method.

    Uses settings.LOG_LEVEL

    Be aware that the user that calls this method will own the log file.  Or the file
    may be created in some other manner owned by anyone.
    If any other process then tries to register this same log file it may not be
    able to read/write, and an IOError will be raised.  That is left uncaught, because
    it's a siutation you shouldn't leave unchecked in your calling code.
    """

    logger = logging.getLogger(log_name)
    logger.setLevel(settings.LOG_LEVEL)

    # If a filename is requested for this logger,
    # make sure it will be created in the right place.
    if filename:
        if not filename.startswith(settings.LOG_PATH):
            filename = os.path.join(settings.LOG_PATH, filename)

        # NB: this will fail if the permissions prevent opening the file.
        # Generally just make sure the user (process) creating the file is
        # the same one that will write to it.
        _add_file_handler(logger, filename, use_formatter)

    _loggers.add(logger)
    return logger
项目:LearnProgrammingBot    作者:Aurora0001    | 项目源码 | 文件源码
def run_bot(args):
    logging.basicConfig(filename=LOGFILE_URI, level=LOG_LEVEL, format=LOG_FORMAT)

    logging.info('Connecting to database {}'.format(DATABASE_URI))
    Session = connect_to_database(DATABASE_URI)
    logging.info('Database connection OK')

    session = Session()
    data = session.query(model.Corpus).all()

    data_values = [col.title + ' ' + col.text for col in data]
    data_targets = [col.category for col in data]

    logging.info('Training classifier with {} values'.format(len(data_values)))
    classifier = Classifier(data_values, data_targets)
    logging.info('Classifier trained')

    logging.info('Connecting to reddit...')
    reddit = get_reddit_client()

    logging.info('Authorizing...')
    access_information = reddit.get_access_information(CLIENT_ACCESSCODE)
    reddit.set_access_credentials(**access_information)
    logging.info('Logged in successfully.')

    for message in praw.helpers.submission_stream(reddit, SUBREDDIT, limit=5, verbosity=0):
        message_text = message.title + ' ' + message.selftext
        pred = classifier.classify(message_text)[0]
        if pred in responses:
            if args.supervised and input('Classify {} as {}? (y/n) '.format(message.id, pred)).lower() != 'y':
                continue

            try:
                message.add_comment(responses[pred] + post_signature)
            except praw.errors.RateLimitExceeded:
                # TODO:
                # Ideally, errors should actually be handled properly. Perhaps a dequeue could be used
                # to store all the posts which failed, which could be retried every minute (or so)
                logging.error('Rate limit exceeded, cannot post to thread {}'.format(message.title))