我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用oslo_log.log.register_options()。
def setup_log(): logging.register_options(CONF) conf_kwargs = dict(args=[], project=DOMAIN, version=VERSION) callback_config = parse_callback_config() if callback_config['ironic_config']: conf_kwargs['default_config_files'] = [ callback_config['ironic_config']] CONF(**conf_kwargs) if callback_config['use_journal']: CONF.set_override('use_journal', True) if callback_config['use_syslog']: CONF.set_override('use_syslog', True) if callback_config['ironic_log_file']: CONF.set_override("log_file", callback_config['ironic_log_file']) logging.setup(CONF, DOMAIN)
def prepare_service(argv=[]): log.register_options(cfg.CONF) log.set_defaults(default_log_levels=['amqp=WARN', 'amqplib=WARN', 'qpid.messagregister_optionsing=INFO', 'oslo.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'paramiko=WARN', 'requests=WARN', 'neutronclient=WARN', 'glanceclient=WARN', 'iotronic.openstack.common=WARN', 'urllib3.connectionpool=WARN', ]) config.parse_args(argv) log.setup(cfg.CONF, 'iotronic')
def main(): core.initialize() logging.register_options(CONF) logging.setup(CONF, 'trio2o-db-manage') CONF.register_cli_opt(command_opt) version_info = pbr.version.VersionInfo('trio2o') try: CONF(sys.argv[1:], project='trio2o', prog='trio2o-db-manage', version=version_info.version_string()) except RuntimeError as e: sys.exit("ERROR: %s" % e) try: CONF.command.func() except Exception as e: sys.exit("ERROR: %s" % e)
def prepare_service(argv=None, config_files=None, share=False): conf = cfg.ConfigOpts() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) db_options.set_defaults(conf) if not share: defaults.set_cors_middleware_defaults() oslo_i18n.enable_lazy() log.register_options(conf) if argv is None: argv = sys.argv conf(argv[1:], project='panko', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) if not share: log.setup(conf, 'panko') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # panko-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) return conf
def __init__(self): logging.register_options(CONF) DOMAIN = "s4t-lightning-rod" CONF(project='iotronic') logging.setup(CONF, DOMAIN) signal.signal(signal.SIGINT, self.stop_handler) LogoLR() global board board = Board() LOG.info('Info:') LOG.info(' - Logs: /var/log/s4t-lightning-rod.log') current_time = board.getTimestamp() LOG.info(" - Current time: " + current_time) self.w = WampManager(board.wamp_config) self.w.start()
def set_default_for_default_log_levels(): """Set the default for the default_log_levels option for Armada. Armada uses some packages that other OpenStack services don't use that do logging. This will set the default_log_levels default level for those packages. This function needs to be called before CONF(). """ extra_log_level_defaults = [ 'kubernetes.client.rest=INFO' ] log.register_options(CONF) log.set_defaults( default_log_levels=log.get_default_log_levels() + extra_log_level_defaults)
def main(): logging.register_options(CONF) logging.setup(CONF, 'bilean-manage') CONF.register_cli_opt(command_opt) try: default_config_files = cfg.find_config_files('bilean', 'bilean-engine') CONF(sys.argv[1:], project='bilean', prog='bilean-manage', version=version.version_info.version_string(), default_config_files=default_config_files) except RuntimeError as e: sys.exit("ERROR: %s" % e) try: CONF.command.func() except Exception as e: sys.exit("ERROR: %s" % e)
def main(): try: logging.register_options(cfg.CONF) cfg.CONF(project='bilean', prog='bilean-api', version=version.version_info.version_string()) logging.setup(cfg.CONF, 'bilean-api') messaging.setup() app = config.load_paste_app() port = cfg.CONF.bilean_api.bind_port host = cfg.CONF.bilean_api.bind_host LOG.info(_LI('Starting Bilean ReST API on %(host)s:%(port)s'), {'host': host, 'port': port}) server = wsgi.Server('bilean-api', cfg.CONF.bilean_api) server.start(app, default_port=port) systemd.notify_once() server.wait() except RuntimeError as ex: sys.exit("ERROR: %s" % six.text_type(ex))
def init_config_and_logging(opts): conf = cfg.CONF conf.register_cli_opts(opts) conf.register_opts(opts) logging.register_options(conf) logging.set_defaults() try: conf(project='performa') validate_required_opts(conf, opts) except cfg.RequiredOptError as e: print('Error: %s' % e) conf.print_usage() exit(1) logging.setup(conf, 'performa') LOG.info('Logging enabled') conf.log_opt_values(LOG, std_logging.DEBUG)
def prepare(CONF): """Prepare config options.""" global _initialized try: if _initialized is False: logging.register_options(CONF) _initialized = True # Adding config file possible_topdir = os.path.normpath( os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)) conf_file = os.path.join(possible_topdir, 'etc', DOMAIN + '.cfg') CONF([], project=DOMAIN, default_config_files=[conf_file] or None, validate_default_values=True) logging.setup(CONF, DOMAIN) except Exception as ex: LOG.error("Preparation failed! %s" % ex)
def init_conf(log_file="valet.log", args=None, grp2opt=None, cli_opts=None, default_config_files=None): CONF.log_file = log_file logging.register_options(CONF) # init conf general_groups = {server_group: server_opts, music_group: music_opts, identity_group: identity_opts, messaging_group: messaging_opts} general_groups.update(grp2opt or {}) _register_conf(general_groups, cli_opts) load_conf(args=args, default_files=default_config_files) # set logger _set_logger()
def prepare_log(service_name): log.register_options(CONF) CONF(default_config_files=['/etc/rock/rock.ini']) CONF.set_default('log_dir', '/var/log/rock') rock_mon_log_file = getattr(CONF, 'rock_mon_log_file', 'rock-mon.log') rock_engine_log_file = getattr(CONF, 'rock_engine_log_file', 'rock-engine.log') if service_name == 'rock-mon': CONF.set_override('log_file', override=rock_mon_log_file) elif service_name == 'rock-engine': CONF.set_override('log_file', override=rock_engine_log_file) else: raise exceptions.InvalidService(service_name=service_name) if not os.path.exists(CONF.log_dir): os.mkdir(CONF.log_dir) log.setup(CONF, service_name)
def parse_args(argv, default_config_files=None, configure_db=True, init_rpc=True): log.register_options(CONF) # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Masakari needs log.set_defaults(default_log_levels=log.get_default_log_levels()) rpc.set_defaults(control_exchange='masakari') config.set_middleware_defaults() CONF(argv[1:], project='masakari', version=version.version_string(), default_config_files=default_config_files) if init_rpc: rpc.init(CONF) if configure_db: sqlalchemy_api.configure(CONF)
def parse_args(argv, default_config_files=None, configure_db=True, init_rpc=True): log.register_options(CONF) # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Nova needs log.set_defaults(default_log_levels=log.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION, sqlite_db='nova.sqlite') rpc.set_defaults(control_exchange='nova') cache.configure(CONF) config.set_middleware_defaults() CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) if init_rpc: rpc.init(CONF) if configure_db: sqlalchemy_api.configure(CONF)
def main(): logging.register_options(CONF) CONF(sys.argv[1:], project='craton-api', default_config_files=[]) logging.setup(CONF, 'craton-api') app = api.load_app() host, port = cfg.CONF.api.host, cfg.CONF.api.port srv = simple_server.make_server(host, port, app) LOG.info("Starting API server in PID: %s" % os.getpid()) srv.serve_forever()
def prepare_service(argv=None): log.register_options(CONF) log.set_defaults(default_log_levels=CONF.default_log_levels) argv = argv or [] config.parse_args(argv) log.setup(CONF, 'cyborg') objects.register_all()
def __init__(self, host): signal.signal(signal.SIGINT, self.stop_handler) logging.register_options(CONF) CONF(project='iotronic') logging.setup(CONF, "iotronic-wamp-agent") # to be removed asap self.host = host self.dbapi = dbapi.get_instance() try: wpa = self.dbapi.register_wampagent( {'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url}) except exception.WampAgentAlreadyRegistered: LOG.warn(_LW("A wampagent with hostname %(hostname)s " "was previously registered. Updating registration"), {'hostname': self.host}) wpa = self.dbapi.register_wampagent( {'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url}, update_existing=True) self.wampagent = wpa self.wampagent.ragent = CONF.wamp.register_agent self.wampagent.save() global AGENT_HOST AGENT_HOST = self.host self.r = RPCServer() self.w = WampManager() self.r.start() self.w.start()
def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack meteos version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action [<args>]") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project='meteos', version=version.version_string()) log.setup(CONF, "meteos") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_('sudo failed, continuing as if nothing happened')) print(_('Please re-run meteos-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def main(): log.register_options(CONF) CONF(sys.argv[1:], project='meteos', version=version.version_string()) log.setup(CONF, "meteos") utils.monkey_patch() launcher = service.process_launcher() server = service.WSGIService('osapi_learning') launcher.launch_service(server, workers=server.workers or 1) launcher.wait()
def __init__(self): if osloConfig: logging.register_options(cfg.CONF) logging.set_defaults() cfg.CONF(args=[], project="broadview_collector", default_config_files=["/etc/broadviewcollector.conf"]) logging.setup(cfg.CONF, 'broadview_collector') self._publishers = [] self._handlers = []
def parse_args(argv, default_config_files=None): log.register_options(CONF) # We use the oslo.log default log levels which includes suds=INFO # and add only the extra levels that Masakari needs log.set_defaults(default_log_levels=log.get_default_log_levels()) CONF(argv[1:], project='masakarimonitors', version=version.version_string(), default_config_files=default_config_files)
def prepare_service(argv=None): if argv is None: argv = [] log.register_options(CONF) config.parse_args(argv) config.set_config_defaults() log.setup(CONF, 'zun') # TODO(yuanying): Uncomment after objects are implemented # objects.register_all()
def __init__(self): self.logger = LOG logging.register_options(CONF) logging.set_defaults(default_log_levels=CONF.default_log_levels) logging.setup(CONF, 'armada')
def parse_args(args=None, usage=None, default_config_files=None): CLI_OPTS = [launch_opt] CONF.register_cli_opts(CLI_OPTS) for group, options in list_opts(): CONF.register_opts(list(options), group) _DEFAULT_LOG_LEVELS = [ 'eventlet.wsgi.server=WARN', 'oslo_service.periodic_task=INFO', 'oslo_service.loopingcall=INFO', 'oslo_db=WARN', 'oslo_concurrency.lockutils=WARN', 'kubernetes.client.rest=%s' % CONF.kubernetes.log_devel, 'keystoneclient=INFO', 'requests.packages.urllib3.connectionpool=CRITICAL', 'urllib3.connectionpool=CRITICAL', ] default_log_levels = log.get_default_log_levels() default_log_levels.extend(_DEFAULT_LOG_LEVELS) log.set_defaults(default_log_levels=default_log_levels) log.register_options(CONF) CONF( args=args, project='qinling', version=version, usage=usage, default_config_files=default_config_files )
def parse_args(): """Parse configuration arguments. Note: This method ensures that configuration will be loaded only once within single python interpreter. """ global _CONF_LOADED if _CONF_LOADED: LOG.debug('Configuration has been already loaded') return log.set_defaults() log.register_options(CONF) CONF(args=[], prog='events-api', project='monasca', version=version.version_str, description='RESTful API to collect events from cloud') log.setup(CONF, product_name='monasca-events-api', version=version.version_str) conf.register_opts(CONF) policy_opts.set_defaults(CONF) _CONF_LOADED = True
def pre_config(): log.register_options(CONF) for option_module in MODULES: option_module.pre_config(CONF) register_opts()
def main(): logging.register_options(cfg.CONF) cfg.CONF(project='bilean', prog='bilean-engine') logging.setup(cfg.CONF, 'bilean-engine') logging.set_defaults() messaging.setup() from bilean.engine import service as engine srv = engine.EngineService(cfg.CONF.host, consts.ENGINE_TOPIC) launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.num_engine_workers) launcher.wait()
def main(): logging.register_options(cfg.CONF) cfg.CONF(project='bilean', prog='bilean-notification') logging.setup(cfg.CONF, 'bilean-notification') logging.set_defaults() messaging.setup() from bilean.notification import notification srv = notification.NotificationService() launcher = service.launch(cfg.CONF, srv) launcher.wait()
def main(): logging.register_options(cfg.CONF) cfg.CONF(project='bilean', prog='bilean-scheduler') logging.setup(cfg.CONF, 'bilean-scheduler') logging.set_defaults() messaging.setup() from bilean.scheduler import service as scheduler srv = scheduler.SchedulerService(cfg.CONF.host, consts.SCHEDULER_TOPIC) launcher = service.launch(cfg.CONF, srv) launcher.wait()
def main(): logging.register_options(CONF) CONF(sys.argv[1:], prog='vspc') logging.setup(CONF, "vspc") if not CONF.serial_log_dir: LOG.error("serial_log_dir is not specified") sys.exit(1) if not os.path.exists(CONF.serial_log_dir): LOG.info("Creating log directory: %s", CONF.serial_log_dir) os.makedirs(CONF.serial_log_dir) srv = VspcServer() srv.start()
def prepare_service(argv=None): argv = [] if argv is None else argv log.register_options(CONF) log.set_defaults(default_log_levels=CONF.default_log_levels + [ 'eventlet.wsgi.server=INFO', 'neutronclient=WARNING']) config.parse_args(argv) log.setup(CONF, 'mogan') objects.register_all()
def make_config(): conf = cfg.ConfigOpts() conf.register_cli_opts(cli_opts) log.register_options(conf) return conf
def init_log(project=PROJECT_NAME): logging.register_options(cfg.CONF) logging.setup(cfg.CONF, project, version=VERSION)
def main(): logging.register_options(CONF) CONF(sys.argv[1:], project='craton-worker', default_config_files=[]) logging.setup(CONF, 'craton') persistence, board, conductor = worker.start(CONF) def stop(signum, _frame): LOG.info('Caught signal %s, gracefully exiting', signum) conductor.stop() signal.signal(signal.SIGTERM, stop) # TODO(gus): eventually feeding in jobs will happen elsewhere and # main() will end here. # # conductor.wait() # sys.exit(0) def make_save_book(persistence, job_id, flow_plugin, plugin_args=(), plugin_kwds={}): flow_id = book_id = job_id # Do these need to be different? book = models.LogBook(book_id) detail = models.FlowDetail(flow_id, uuidutils.generate_uuid()) book.add(detail) factory_args = [flow_plugin] + list(plugin_args) factory_kwargs = plugin_kwds engines.save_factory_details(detail, workflow_factory, factory_args, factory_kwargs) with contextlib.closing(persistence.get_connection()) as conn: conn.save_logbook(book) return book # Feed in example task job_uuid = uuidutils.generate_uuid() LOG.debug('Posting job %s', job_uuid) details = { 'store': { 'foo': 'bar', }, } job = board.post( job_uuid, book=make_save_book( persistence, job_uuid, 'testflow', plugin_kwds=dict(task_delay=2)), details=details) # Run forever. TODO(gus): This is what we want to do in production # conductor.wait() job.wait() LOG.debug('Job finished: %s', job.state) conductor.stop()
def __init__(self, host): logging.register_options(CONF) CONF(project='iotronic') logging.setup(CONF, "iotronic-conductor") signal.signal(signal.SIGINT, self.stop_handler) if not host: host = CONF.host self.host = host self.topic = MANAGER_TOPIC self.dbapi = dbapi.get_instance() try: cdr = self.dbapi.register_conductor( {'hostname': self.host}) except exception.ConductorAlreadyRegistered: LOG.warn(_LW("A conductor with hostname %(hostname)s " "was previously registered. Updating registration"), {'hostname': self.host}) cdr = self.dbapi.register_conductor({'hostname': self.host}, update_existing=True) self.conductor = cdr transport = oslo_messaging.get_transport(cfg.CONF) target = oslo_messaging.Target(topic=self.topic, server=self.host, version=self.RPC_API_VERSION) ragent = self.dbapi.get_registration_wampagent() LOG.info("Found registration agent: %s on %s", ragent.hostname, ragent.wsurl) endpoints = [ endp.ConductorEndpoint(ragent), ] access_policy = dispatcher.DefaultRPCAccessPolicy self.server = oslo_messaging.get_rpc_server( transport, target, endpoints, executor='threading', access_policy=access_policy) self.server.start() while True: time.sleep(1)
def main(ctx, debug, api, url, token): """ Multi Helm Chart Deployment Manager Common actions from this point include: \b $ armada apply $ armada test $ armada tiller $ armada validate Environment: \b $TOKEN set auth token $HOST set armada service host endpoint This tool will communicate with deployed Tiller in your Kubernetes cluster. """ if not ctx.obj: ctx.obj = {} if api: if not url or not token: raise click.ClickException( 'When api option is enable user needs to pass url') else: ctx.obj['api'] = api parsed_url = urlparse(url) ctx.obj['CLIENT'] = ArmadaClient( ArmadaSession( host=parsed_url.netloc, scheme=parsed_url.scheme, token=token) ) log.register_options(CONF) if debug: CONF.debug = debug log.set_defaults(default_log_levels=CONF.default_log_levels) log.setup(CONF, 'armada')
def configure(): """Register configuration.""" CONF.register_cli_opts(build_os_options()) CONF.register_opts(_COMMON) monitors_grp = cfg.OptGroup('monitoring', title='Monitoring', help='Monitoring Driver/plugin to be used to ' 'monitor compute nodes') CONF.register_group(monitors_grp) CONF.register_opts(_MONITORS, group='monitoring') fencers_grp = cfg.OptGroup('fencer', title='fencer Options', help='fencer Driver/plugin to be used to ' 'fence compute nodes') CONF.register_group(fencers_grp) CONF.register_opts(_FENCER, group='fencer') # Evacuation Section :) evacuators_grp = cfg.OptGroup('evacuation', title='Evacuation Options', help='Evacuation Driver/plugin opts to be ' 'used to Evacuate compute nodes') CONF.register_group(evacuators_grp) CONF.register_opts(_EVACUATION, group='evacuation') # Notification Section :) notifiers_grp = cfg.OptGroup('notifiers', title='Notification Options', help='Notification Driver/plugin opts to be ' 'used to Notify admins/users if failure ' 'happens') CONF.register_group(notifiers_grp) CONF.register_opts(_NOTIFIERS, group='notifiers') # Keystone Auth keystone_grp = cfg.OptGroup('keystone_authtoken', title='Keystone Auth Options', help='OpenStack Credentials to call the nova ' 'APIs to evacuate ') CONF.register_group(keystone_grp) CONF.register_opts(_KEYSTONE_AUTH_TOKEN, group='keystone_authtoken') default_conf = cfg.find_config_files('freezer', 'freezer-dr', '.conf') log.register_options(CONF) CONF(args=sys.argv[1:], project='freezer', default_config_files=default_conf, version=FREEZER_DR_VERSION)