Python logging 模块,critical() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.critical()

项目:PowerSpikeGG    作者:PowerSpikeGG    | 项目源码 | 文件源码
def __init__(self, lazy_connection=False):
        """Constructor. Initialize the client.

        Parameters:
            lazy_connection: avoid testing if the connection is working while
                initializing it.
        """
        if self.address is None:
            self.address = "mongodb://%s/" % FLAGS.rawdata_cache_server_address

        for _ in range(FLAGS.mongodb_connection_retry):
            self.client = self._connect(self.address, lazy_connection)
            if self.client is not None:
                break
        else:
            logging.critical("Unable to reach the MongoDB server.")
项目:manubot    作者:greenelab    | 项目源码 | 文件源码
def main():
    """
    Called as a console_scripts entry point in setup.py. This function defines
    the manubot command line script.
    """
    # Track if message gets logged with severity of error or greater
    # See https://stackoverflow.com/a/45446664/4651668
    error_handler = errorhandler.ErrorHandler()

    # Log to stderr
    logger = logging.getLogger()
    stream_handler = logging.StreamHandler(stream=sys.stderr)
    stream_handler.setFormatter(logging.Formatter('## {levelname}\n{message}', style='{'))
    logger.addHandler(stream_handler)

    args = parse_arguments()
    logger.setLevel(getattr(logging, args.log_level))

    prepare_manuscript(args)

    if error_handler.fired:
        logging.critical('Failure: exiting with code 1 due to logged errors')
        raise SystemExit(1)
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def run(self):
        '''
        Called by twisted
        '''
        # load initial config
        self.refresh_config()
        if self.config is None:
            logging.critical("cannot start due to error in config file")
            return

        # refresh and check status every event_period seconds
        self.refresh_task = task.LoopingCall(self.refresh_loop)
        refresh_deferred = self.refresh_task.start(self.config['event_period'], now=False)
        refresh_deferred.addErrback(errorCallback)

        # setup server for receiving blinded counts from the DC nodes and key shares from the SK nodes
        listen_port = self.config['listen_port']
        key_path = self.config['key']
        cert_path = self.config['cert']
        ssl_context = ssl.DefaultOpenSSLContextFactory(key_path, cert_path)

        logging.info("Tally Server listening on port {}".format(listen_port))
        reactor.listenSSL(listen_port, self, ssl_context)
        reactor.run()
项目:cpsc415    作者:WheezePuppet    | 项目源码 | 文件源码
def execute_action(self, agent, action):
        '''Have the agent carry out this action. If a move in a compass
        direction, it may work, or may not, depending on whether there's an
        obstacle there. The next percept (2nd element of tuple) will tell the
        agent whether this happened.'''
        agent._bump = False
        if action in ['Left','Up','Right','Down']:
            agent._bump = self.try_to_move_in_dir(agent, action)
        elif action == 'Forward':
            agent._bump = self.try_to_move_in_dir(agent,
                agent._facing_direction)
        elif action == 'TurnLeft':
            directions = [ 'Up','Left','Down','Right','Up' ]
            agent._facing_direction = directions[
                directions.index(agent._facing_direction) + 1]
        elif action == 'TurnRight':
            directions = [ 'Up','Right','Down','Left','Up' ]
            agent._facing_direction = directions[
                directions.index(agent._facing_direction) + 1]
        elif action == 'NoOp':
            pass
        else:
            logging.critical("UNKNOWN action {}!!".format(action))
        self.notify_observers(agent)
项目:KodiDevKit    作者:phil65    | 项目源码 | 文件源码
def run(self, edit):
        self.label_ids = []
        self.labels = []
        region = self.view.sel()[0]
        if region.begin() == region.end():
            logging.critical("Please select the complete label")
            return False
        word = self.view.substr(region)
        for po_file in INFOS.get_po_files():
            for entry in po_file:
                if entry.msgid.lower() == word.lower() and entry.msgctxt not in self.label_ids:
                    self.label_ids.append(entry.msgctxt)
                    self.labels.append(["%s (%s)" % (entry.msgid, entry.msgctxt), entry.comment])
        self.labels.append("Create new label")
        sublime.active_window().show_quick_panel(items=self.labels,
                                                 on_select=lambda s: self.on_done(s, region),
                                                 selected_index=0)
项目:KodiDevKit    作者:phil65    | 项目源码 | 文件源码
def check_busy(func):
    """
    Decorator to check for self.is_busy
    Only one of the decorated functions may run simultaniously
    """

    @wraps(func)
    def decorator(self, *args, **kwargs):
        if self.is_busy:
            logging.critical("Already busy. Please wait.")
            return None
        self.is_busy = True
        try:
            func(self, *args, **kwargs)
        except Exception as e:
            logging.critical(e)
        self.is_busy = False
    return decorator
项目:Grating_Advanced_Simulation_Platform    作者:GratingLaboratories    | 项目源码 | 文件源码
def sel_handler(self):
        if self.event.key == K_q:
            if self.platform.focus in self.current_selection:
                pos = self.current_selection.index(self.platform.focus)
                index = (pos + 1) % len(self.current_selection)
                self.platform.focus = self.current_selection[index]
            elif len(self.current_selection) > 0:
                self.platform.focus = self.current_selection[0]
            else:
                self.platform.focus = None
        elif self.event.key == K_e:
            if self.current_selection == self.platform.operands:
                self.current_selection = self.platform.operators
            elif self.current_selection == self.platform.operators:
                self.current_selection = self.platform.operands
            else:
                logging.critical("Selection error!")
        else:
            pass
项目:nojs    作者:chrisdickinson    | 项目源码 | 文件源码
def InstallKVM():
  """Installs KVM packages."""
  rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
  if rc:
    logging.critical('ERROR: Did not install KVM. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
  # TODO(navabi): Use modprobe kvm-amd on AMD processors.
  rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
  if rc:
    logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
                     'hardware virtualization is enabled in BIOS.')
  # Now check to ensure KVM acceleration can be used.
  if not RunKvmOk():
    logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
项目:nojs    作者:chrisdickinson    | 项目源码 | 文件源码
def SuppressLogging(level=logging.ERROR):
  """Momentarilly suppress logging events from all loggers.

  TODO(jbudorick): This is not thread safe. Log events from other threads might
  also inadvertently disappear.

  Example:

    with logging_utils.SuppressLogging():
      # all but CRITICAL logging messages are suppressed
      logging.info('just doing some thing') # not shown
      logging.critical('something really bad happened') # still shown

  Args:
    level: logging events with this or lower levels are suppressed.
  """
  logging.disable(level)
  yield
  logging.disable(logging.NOTSET)
项目:sshchan    作者:einchan    | 项目源码 | 文件源码
def __init__(self, name='', desc='', config=None):
        assert config is not None, logging.critical(
            "Board.__init__: config is None")
        # Configuration object used to read / write config values.
        self.config = config
        self.c = Colors()

        self._name = name.lower()
        self._desc = desc
        self.thread = 0 # The thread that the user is viewing.
        self.path = os.path.join(self.config.root, "boards", self._name)
        self.index_path = os.path.join(self.path, "index")
        self.boardlist_path = self.config.boardlist_path

        if self.add_board():
            logging.info(
                'Board "/%s/ - %s" added succesfully.', self._name, self._desc)
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def run(self, remoteName, remoteHost):
        self.connect(remoteName, remoteHost)
        self.__remoteOps = RemoteOperations(self.__smbConnection, self.__doKerberos, self.__kdcHost)

        try:
            self.__remoteOps.enableRegistry()
        except Exception, e:
            logging.debug(str(e))
            logging.warning('Cannot check RemoteRegistry status. Hoping it is started...')
            self.__remoteOps.connectWinReg()

        try:
            dce = self.__remoteOps.getRRP()

            if self.__action == 'QUERY':
                self.query(dce, self.__options.keyName)
            else:
                logging.error('Method %s not implemented yet!' % self.__action)
        except (Exception, KeyboardInterrupt), e:
            # import traceback
            # traceback.print_exc()
            logging.critical(str(e))
        finally:
            if self.__remoteOps:
                self.__remoteOps.finish()
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def do_put(self, s):
        try:
            params = s.split(' ')
            if len(params) > 1:
                src_path = params[0]
                dst_path = params[1]
            elif len(params) == 1:
                src_path = params[0]
                dst_path = ''

            src_file = os.path.basename(src_path)
            fh = open(src_path, 'rb')
            dst_path = string.replace(dst_path, '/','\\')
            import ntpath
            pathname = ntpath.join(ntpath.join(self.__pwd,dst_path), src_file)
            drive, tail = ntpath.splitdrive(pathname)
            logging.info("Uploading %s to %s" % (src_file, pathname))
            self.__transferClient.putFile(drive[:-1]+'$', tail, fh.read)
            fh.close()
        except Exception, e:
            logging.critical(str(e))
            pass
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def openPipe(self, s, tid, pipe, accessMask):
        pipeReady = False
        tries = 50
        while pipeReady is False and tries > 0:
            try:
                s.waitNamedPipe(tid,pipe)
                pipeReady = True
            except:
                tries -= 1
                time.sleep(2)
                pass

        if tries == 0:
            logging.critical('Pipe not ready, aborting')
            raise

        fid = s.openFile(tid,pipe,accessMask, creationOption = 0x40, fileAttributes = 0x80)

        return fid
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def connectPipe(self):
        try:
            lock.acquire()
            global dialect
            self.server = SMBConnection('*SMBSERVER', self.transport.get_smb_connection().getRemoteHost(),
                                        sess_port=self.port, preferredDialect=dialect)
            user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
            self.server.login(user, passwd, domain, lm, nt)
            lock.release()
            self.tid = self.server.connectTree('IPC$') 

            self.server.waitNamedPipe(self.tid, self.pipe)
            self.fid = self.server.openFile(self.tid,self.pipe,self.permissions, creationOption = 0x40, fileAttributes = 0x80)
            self.server.setTimeout(1000000)
        except Exception, e:
            logging.critical("Something wen't wrong connecting the pipes(%s), try again" % self.__class__)
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def do_put(self, s):
        try:
            params = s.split(' ')
            if len(params) > 1:
                src_path = params[0]
                dst_path = params[1]
            elif len(params) == 1:
                src_path = params[0]
                dst_path = ''

            src_file = os.path.basename(src_path)
            fh = open(src_path, 'rb')
            dst_path = string.replace(dst_path, '/','\\')
            import ntpath
            pathname = ntpath.join(ntpath.join(self.__pwd,dst_path), src_file)
            drive, tail = ntpath.splitdrive(pathname)
            logging.info("Uploading %s to %s" % (src_file, pathname))
            self.__transferClient.putFile(drive[:-1]+'$', tail, fh.read)
            fh.close()
        except Exception, e:
            logging.critical(str(e))
            pass
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def openPipe(self, s, tid, pipe, accessMask):
        pipeReady = False
        tries = 50
        while pipeReady is False and tries > 0:
            try:
                s.waitNamedPipe(tid,pipe)
                pipeReady = True
            except:
                tries -= 1
                time.sleep(2)
                pass

        if tries == 0:
            logging.critical('Pipe not ready, aborting')
            raise

        fid = s.openFile(tid,pipe,accessMask, creationOption = 0x40, fileAttributes = 0x80)

        return fid
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def connectPipe(self):
        try:
            lock.acquire()
            global dialect
            self.server = SMBConnection('*SMBSERVER', self.transport.get_smb_connection().getRemoteHost(),
                                        sess_port=self.port, preferredDialect=dialect)
            user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
            self.server.login(user, passwd, domain, lm, nt)
            lock.release()
            self.tid = self.server.connectTree('IPC$') 

            self.server.waitNamedPipe(self.tid, self.pipe)
            self.fid = self.server.openFile(self.tid,self.pipe,self.permissions, creationOption = 0x40, fileAttributes = 0x80)
            self.server.setTimeout(1000000)
        except:
            logging.critical("Something wen't wrong connecting the pipes(%s), try again" % self.__class__)
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def openPipe(self, s, tid, pipe, accessMask):
        pipeReady = False
        tries = 50
        while pipeReady is False and tries > 0:
            try:
                s.waitNamedPipe(tid,pipe)
                pipeReady = True
            except:
                tries -= 1
                time.sleep(2)
                pass

        if tries == 0:
            logging.critical('Pipe not ready, aborting')
            raise

        fid = s.openFile(tid,pipe,accessMask, creationOption = 0x40, fileAttributes = 0x80)

        return fid
项目:PiBunny    作者:tholum    | 项目源码 | 文件源码
def do_get(self, src_path):
        try:
            if self.transferClient is None:
                self.connect_transferClient()

            import ntpath
            filename = ntpath.basename(src_path)
            fh = open(filename,'wb')
            logging.info("Downloading %s\%s" % (self.share, src_path))
            self.transferClient.getFile(self.share, src_path, fh.write)
            fh.close()
        except Exception, e:
            logging.critical(str(e))
            pass

        self.send_data('\r\n')
项目:trnnr    作者:NullHypothesis    | 项目源码 | 文件源码
def fetch_descriptors():
    """
    Fetch and return relay descriptors.
    """

    downloader = DescriptorDownloader(use_mirrors=True, timeout=20)
    query = downloader.get_server_descriptors(validate=False)

    descs = {}
    try:
        for desc in query.run():
            descs[desc.fingerprint] = desc
        log.info("Query took %0.2f seconds." % query.runtime)
    except Exception as exc:
        log.critical("Unable to retrieve server descriptors: %s" % exc)

    log.info("Downloaded %d descs." % len(descs))

    return descs
项目:nitro    作者:KVM-VMI    | 项目源码 | 文件源码
def build_syscall_name_map(self):
        # Its a bit difficult to know where the system call table ends, here we
        # do something kind of risky and read as long as translate_v2ksym
        # returns something that looks like a system call handler.
        mapping = {}
        for i in range(0, MAX_SYSTEM_CALL_COUNT):
            p_addr = self.sys_call_table_addr + (i * VOID_P_SIZE)
            try:
                addr = self.libvmi.read_addr_va(p_addr, 0)
                symbol = self.libvmi.translate_v2ksym(addr)
            except LibvmiError as error:
                logging.critical("Failed to build syscall name map")
                raise error
            else:
                if symbol is not None:
                    mapping[symbol] = i
                else:
                    break
        return mapping
项目:nitro    作者:KVM-VMI    | 项目源码 | 文件源码
def find_qemu_pid(vm_name):
    """
    Find QEMU's PID that is associated with a given virtual machine

    :param str vm_name: libvirt domain name
    :rtype: int
    """
    logging.info('Finding QEMU pid for domain %s', vm_name)
    libvirt_vm_pid_file = '/var/run/libvirt/qemu/{}.pid'.format(vm_name)
    try:
        with open(libvirt_vm_pid_file, 'r') as f:
            content = f.read()
            pid = int(content)
            return pid
    except IOError:
        for proc in psutil.process_iter():
            cmdline = proc.cmdline()[1:]
            if proc.name() == "qemu-system-x86_64" and \
               next((True for k, v in zip(cmdline, cmdline[1:]) if k == "-name" and vm_name in v), False):
                return proc.pid
        logging.critical('Cannot find QEMU')
        raise QEMUNotFoundError('Cannot find QEMU')
项目:tornado-compute    作者:michaelosthege    | 项目源码 | 文件源码
def post(self):
        try:
            url = self.get_argument("url", None)
            if (not url):   # take a default image
                url = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Serpent_roi_bandes_grises_01.JPG/300px-Serpent_roi_bandes_grises_01.JPG"
            call = dualprocessing.AsyncCall("predict", url=url)
            response = yield computationBroker.submit_call_async(call)
            if (response.Success):
                self.write(response.Result)
            else:
                raise response.Error
        except:
            def lastExceptionString():
                exc_type, ex, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                return "{0} in {1}:{2}".format(ex, fname, exc_tb.tb_lineno)
            exmsg = lastExceptionString()
            logging.critical(exmsg)
            self.write(exmsg)
项目:pyswd    作者:pavelrevak    | 项目源码 | 文件源码
def start(self):
        """Application start point"""
        try:
            self._swd = swd.Swd(swd_frequency=self._swd_frequency)
            self.print_device_info()
            self.process_actions()
        except swd.stlinkcom.StlinkComNotFound:
            logging.error("ST-Link not connected.")
        except PyswdException as err:
            logging.error("pyswd error: %s.", err)
        except swd.stlink.StlinkException as err:
            logging.critical("Stlink error: %s.", err)
        except swd.stlinkcom.StlinkComException as err:
            logging.critical("StlinkCom error: %s.", err)
        else:
            return 0
        return 1
项目:chromium-build    作者:discordapp    | 项目源码 | 文件源码
def InstallKVM():
  """Installs KVM packages."""
  rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
  if rc:
    logging.critical('ERROR: Did not install KVM. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
  # TODO(navabi): Use modprobe kvm-amd on AMD processors.
  rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
  if rc:
    logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
                     'hardware virtualization is enabled in BIOS.')
  # Now check to ensure KVM acceleration can be used.
  if not RunKvmOk():
    logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
项目:chromium-build    作者:discordapp    | 项目源码 | 文件源码
def SuppressLogging(level=logging.ERROR):
  """Momentarilly suppress logging events from all loggers.

  TODO(jbudorick): This is not thread safe. Log events from other threads might
  also inadvertently dissapear.

  Example:

    with logging_utils.SuppressLogging():
      # all but CRITICAL logging messages are suppressed
      logging.info('just doing some thing') # not shown
      logging.critical('something really bad happened') # still shown

  Args:
    level: logging events with this or lower levels are suppressed.
  """
  logging.disable(level)
  yield
  logging.disable(logging.NOTSET)
项目:email-actions    作者:shantanugoel    | 项目源码 | 文件源码
def check_config(path):
  if not os.path.exists(path):
    logging.warning('Config file %s doesn\'t exist. Trying to create'
                    % (path))
    if create_config(path):
      logging.warning('Created new config file %s. Edit it first to '
                      'configure your settings correctly & then run '
                      'program again' % (path))
    return False
  else:
    global cfg
    try:
      with open(path, "r") as config_file:
        cfg = yaml.load(config_file)
    except IOError as e:
      logging.critical('Error while reading config_file %s: %s'
                       % (path, str(e)))
      logging.critical('Check the config file path and try again')
      return False
    if not cfg or 'filters' not in cfg or not cfg['filters']:
      logging.critical('Empty or malformed config_file %s' % (path))
      logging.critical('Check the config file path and try again')
      return False
  return True
项目:python-cookbook-3rd    作者:tuanavu    | 项目源码 | 文件源码
def main():
    # Configure the logging system
    logging.basicConfig(
        filename='app.log',
        level=logging.ERROR
    )

    # Variables (to make the calls that follow work)
    hostname = 'www.python.org'
    item = 'spam'
    filename = 'data.csv'
    mode = 'r'

    # Example logging calls (insert into your program)
    logging.critical('Host %s unknown', hostname)
    logging.error("Couldn't find %r", item)
    logging.warning('Feature is deprecated')
    logging.info('Opening file %r, mode=%r', filename, mode)
    logging.debug('Got here')
项目:python-cookbook-3rd    作者:tuanavu    | 项目源码 | 文件源码
def main():
    # Configure the logging system
    logging.config.fileConfig('logconfig.ini')

    # Variables (to make the calls that follow work)
    hostname = 'www.python.org'
    item = 'spam'
    filename = 'data.csv'
    mode = 'r'

    # Example logging calls (insert into your program)
    logging.critical('Host %s unknown', hostname)
    logging.error("Couldn't find %r", item)
    logging.warning('Feature is deprecated')
    logging.info('Opening file %r, mode=%r', filename, mode)
    logging.debug('Got here')
项目:PMUtoCSVwriter    作者:PaulBrogan    | 项目源码 | 文件源码
def __init__(self):

        self.AsciiOP = False      # change to False for unicode output

        InvertSymmetricalComponents = True 
        self.SymAng = math.pi * (2. / 3.) 
        if InvertSymmetricalComponents == True:
            self.SymAng = -self.SymAng

        logging.basicConfig(filename = 'PMU2CSV_logfile.log', level = logging.INFO, filemode='w', format='%(asctime)s %(message)s')
        logging.critical('-------------------------------------')
        logging.critical('Script started at ' + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) + 'GMT')

        threading.Thread.__init__(self)

        self.PMUip = "192.168.0.10"
        self.PMUport = 4712
        self.PMUnumber = 20

        self.CSVlabel = "PMUtoCSV_scriptDefault_"
        self.WriteEvery = 5
        self.CloseFileAfter = 3600

        self.timeSF = 13
        self.dataSF = 7
项目:PMUtoCSVwriter    作者:PaulBrogan    | 项目源码 | 文件源码
def OpenTCPcomms(self):
        self.serversocket = self.sock
        Connected = False
        Attempts = 0
        while Connected == False and Attempts <15:
            #self.serversocket.connect((self.PMUip, self.PMUport))
            try:
                #print(self.PMUip, self.PMUport)
                self.serversocket.connect((self.PMUip, self.PMUport))
                Connected = True
                logging.critical('Connected to PMU')

            except:
                e = sys.exc_info()[0:2]
                logging.critical('TCP connection failed with ' + str(e) + ' Attempt ' + str(Attempts)) 
                time.sleep(0.25)
                Attempts += 1
                self.CloseTCPcomms()
项目:PMUtoCSVwriter    作者:PaulBrogan    | 项目源码 | 文件源码
def Get_CF2_and_initialise(self):
        try:
            self.CloseTCPcomms()
        except:
            pass

        self.OpenTCPcomms()
        self.serversocket.send(self.SendCFG2())

        CF2 = b''
        X = self.serversocket.recv(1024)
        while True:
            CF2 += X
            try:
                X = self.serversocket.recv(1024)
                if X == b'':
                    break
            except:
                break

        self.C37dataEnter(CF2)
        logging.critical('Connected, Command Frame 2 received and processed')
项目:attention-sum-reader    作者:cairoHy    | 项目源码 | 文件源码
def ensemble_test(test_data, models):
    data = [[] for _ in d_bucket]
    for test_document, test_question, test_answer, test_candidate in zip(*test_data):
        if len(test_document) <= d_bucket[0][0]:
            data[0].append((test_document, test_question, test_answer, test_candidate))
            continue
        if len(test_document) >= d_bucket[-1][-1]:
            data[len(models) - 1].append((test_document, test_question, test_answer, test_candidate))
            continue
        for bucket_id, (d_min, d_max) in enumerate(d_bucket):
            if d_min < len(test_document) < d_max:
                data[bucket_id].append((test_document, test_question, test_answer, test_candidate))
                continue

    acc, num = 0, 0
    for i in range(len(models)):
        num += len(data[i])
        logging.info("Start testing.\nTesting in {} samples.".format(len(data[i])))
        acc_i, _ = models[i].test(zip(*data[i]), batch_size=1)
        acc += acc_i
    logging.critical("Ensemble test done.\nAccuracy is {}".format(acc / num))
项目:freeipa-pr-ci    作者:freeipa    | 项目源码 | 文件源码
def _before(self):
        super(RunPytest, self)._before()

        # Prepare test config files
        try:
            create_file_from_template(
                constants.ANSIBLE_VARS_TEMPLATE.format(
                    action_name=self.action_name),
                os.path.join(self.data_dir, 'vars.yml'),
                dict(repofile_url=urllib.parse.urljoin(
                        self.build_url, 'rpms/freeipa-prci.repo'),
                     update_packages=self.update_packages))
        except (OSError, IOError) as exc:
            msg = "Failed to prepare test config files"
            logging.debug(exc, exc_info=True)
            logging.critical(msg)
            raise exc
项目:pcbot    作者:pckv    | 项目源码 | 文件源码
def setowner(message: discord.Message):
    """ Set the bot owner. Only works in private messages. """
    if not message.channel.is_private:
        return

    assert not plugins.owner_cfg.data, "An owner is already set."

    owner_code = str(random.randint(100, 999))
    logging.critical("Owner code for assignment: {}".format(owner_code))

    await client.say(message,
                     "A code has been printed in the console for you to repeat within 60 seconds.")
    user_code = await client.wait_for_message(timeout=60, channel=message.channel, content=owner_code)

    assert user_code, "You failed to send the desired code."

    if user_code:
        await client.say(message, "You have been assigned bot owner.")
        plugins.owner_cfg.data = message.author.id
        plugins.owner_cfg.save()
项目:millilauncher    作者:fhfuih    | 项目源码 | 文件源码
def __init__(self, mc_dir=default_minecraft_directory, java_dir=default_java_directory):
        self.minecraft_directory = mc_dir
        self.java_directory = java_dir
        if not mc_dir or not os.path.exists(mc_dir):
            logging.critical('Invalid /.minecraft/ directory.')
            raise FileNotFoundError('Invalid /.minecraft/ directory {0}'.format(mc_dir))
        if not java_dir or not os.path.exists(java_dir):
            logging.critical('Invalid javaw.exe directory.')
            raise FileNotFoundError('Invalid javaw.exe directory {0}'.format(java_dir))
        self.libraries_directory = os.path.join(self.minecraft_directory, 'libraries')
        self.assets_directory = os.path.join(self.minecraft_directory, 'assets')
        self.version_directory = None
        self.natives_directory = None
        self.libraries = None
        os.chdir(self.minecraft_directory)
        self.versions = MCVersionsList(mc_dir)
项目:onedrived-dev    作者:xybu    | 项目源码 | 文件源码
def get_repo_table(ctx):
    """
    :param onedrived.od_context.UserContext ctx:
    :return dict[str, [onedrived.od_repo.OneDriveLocalRepository]]:
    """
    all_accounts = {}
    all_account_ids = ctx.all_accounts()
    if len(all_account_ids) == 0:
        logging.critical('onedrived is not linked with any OneDrive account. Please configure onedrived first.')
        sys.exit(1)
    for account_id in all_account_ids:
        authenticator, drives = get_authenticator_and_drives(ctx, account_id)
        local_repos = [od_repo.OneDriveLocalRepository(ctx, authenticator, d, ctx.get_drive(d.id))
                       for d in drives if d.id in ctx.config['drives']]
        if len(local_repos) > 0:
            all_accounts[account_id] = local_repos
        else:
            profile = ctx.get_account(account_id)
            logging.info('No Drive associated with account "%s" (%s).', profile.account_email, account_id)
    return all_accounts
项目:VocaBot    作者:bomjacob    | 项目源码 | 文件源码
def main():
    token = os.getenv('VOCABOT_TOKEN')
    if not token:
        logging.critical('NO TOKEN FOUND!')
        sys.exit()

    updater = Updater(token)

    # Now we know bot name, set the user-agent of vocadb api session
    voca_db.set_name(updater.bot.name)

    dp = updater.dispatcher

    # Add main handlers
    dp = add_update_handlers(dp)

    # Also add our "log everything" error handler
    dp.add_error_handler(error)

    # Start fetching updates, we might wanna use webhooks instead at some points.
    updater.start_polling()

    # Loop till we quit
    updater.idle()
项目:TtcnComplete    作者:HuiMi24    | 项目源码 | 文件源码
def is_valid(self):
        """Check settings validity. If any of the settings is None the settings
        are not valid.

        Returns:
            bool: validity of settings
        """
        if self.sublime_settings is None:
            logging.critical(" no sublime_settings found")
            return False
        if self.debug_mode is None:
            logging.critical(" no debug_mode found")
            return False
        if self.triggers is None:
            logging.critical(" no triggers found")
            return False
        return True
项目:erine.email    作者:mdavranche    | 项目源码 | 文件源码
def ee2f_getReplyAddress(fromAddress, toAddress):
  execQuery("SELECT `disposableMailAddress` FROM `replyAddress` WHERE `mailAddress` = %s", getAddress(toAddress))
  replyAddress = dbCursor.fetchone()
  if replyAddress:
    execQuery("SELECT `user`.`mailAddress` FROM `user` JOIN `disposableMailAddress` ON `user`.`ID` = `disposableMailAddress`.`userID` WHERE `disposableMailAddress`.`mailAddress` = %s", replyAddress[0])
    allowedEmail = dbCursor.fetchone()
    if not allowedEmail:
      logging.critical("Can not check if " + getAddress(fromAddress) + " is allowed to send an email as " + replyAddress[0] + ". Assuming yes.")
    else:
      if allowedEmail[0] != getAddress(fromAddress):
        raise BounceException('"{}" is not allowed to send an email as "{}"').format(
          getAddress(fromAddress), replyAddress[0]
        )
    label = getLabel(fromAddress)
    if label:
      return label + " <" + replyAddress[0] + ">"
    else:
      return replyAddress[0]
  else:
    raise BounceException('Invalid email address: "{}"'.format(toAddress))

# A foreign address is writing to an erine.email user (f2ee as Foreign To Erine.Email)
# Forge or retrieve reply email address
# Bounce email on invalid toAddress
项目:gn_build    作者:realcome    | 项目源码 | 文件源码
def InstallKVM():
  """Installs KVM packages."""
  rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
  if rc:
    logging.critical('ERROR: Did not install KVM. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
  # TODO(navabi): Use modprobe kvm-amd on AMD processors.
  rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
  if rc:
    logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
                     'hardware virtualization is enabled in BIOS.')
  # Now check to ensure KVM acceleration can be used.
  if not RunKvmOk():
    logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
                     'virtualization is enabled in BIOS (i.e. Intel VT-x or '
                     'AMD SVM).')
项目:gn_build    作者:realcome    | 项目源码 | 文件源码
def SuppressLogging(level=logging.ERROR):
  """Momentarilly suppress logging events from all loggers.

  TODO(jbudorick): This is not thread safe. Log events from other threads might
  also inadvertently dissapear.

  Example:

    with logging_utils.SuppressLogging():
      # all but CRITICAL logging messages are suppressed
      logging.info('just doing some thing') # not shown
      logging.critical('something really bad happened') # still shown

  Args:
    level: logging events with this or lower levels are suppressed.
  """
  logging.disable(level)
  yield
  logging.disable(logging.NOTSET)
项目:text-classification-cnn-rnn    作者:fudannlp16    | 项目源码 | 文件源码
def pad_sentences(sentences, padding_word="<PAD/>", forced_sequence_length=None):
    """Pad setences during training or prediction"""
    if forced_sequence_length is None: # Train
        sequence_length = max(len(x) for x in sentences)
    else: # Prediction
        logging.critical('This is prediction, reading the trained sequence length')
        sequence_length = forced_sequence_length
    logging.critical('The maximum length is {}'.format(sequence_length))

    padded_sentences = []
    for i in range(len(sentences)):
        sentence = sentences[i]
        num_padding = sequence_length - len(sentence)

        if num_padding < 0: # Prediction: cut off the sentence if it is longer than the sequence length
            logging.info('This sentence has to be cut off because it is longer than trained sequence length')
            padded_sentence = sentence[0:sequence_length]
        else:
            padded_sentence = sentence + [padding_word] * num_padding
        padded_sentences.append(padded_sentence)
    return padded_sentences
项目:spyglass    作者:Crypta-Eve    | 项目源码 | 文件源码
def changeTheme(self, newTheme=None):
        if newTheme is not None:
            for action in self.themeGroup.actions():
                if action.theme == newTheme:
                    action.setChecked(True)
        action = self.themeGroup.checkedAction()
        styles = Styles()
        styles.setStyle(action.theme)
        theme = styles.getStyle()
        self.setStyleSheet(theme)
        logging.critical("Setting new theme: {}".format(action.theme))
        self.cache.putIntoCache("theme", action.theme, 60 * 60 * 24 * 365)
        self.setupMap()
        self.clearIntelChat()
        if self.autoRescanIntelEnabled:
            self.rescanIntel()
项目:workforce-scripts    作者:Esri    | 项目源码 | 文件源码
def get_worker_id(org_url, token, projectId, worker):
    """
    Get the logged in users dispatcher id
    :param org_url: The organizational url to use
    :param token: The token to authenticate with
    :param projectId: The projectId to use
    :param worker: The name of the worker to get the id of
    :return: The OBJECTID of the specified dispatcher
    """
    logger = logging.getLogger()
    logger.debug("Getting dispatcher id for: {}...".format(worker))
    worker_fl_url = workforcehelpers.get_workers_feature_layer_url(org_url, token, projectId)
    workers = workforcehelpers.query_feature_layer(worker_fl_url, token, where="userId='{}'".format(worker))
    if workers["features"]:
        return workers["features"][0]["attributes"]["OBJECTID"]
    else:
        logger.critical("{} is not a worker".format(worker))
        return None
项目:workforce-scripts    作者:Esri    | 项目源码 | 文件源码
def get_worker_id(shh, projectId, worker):
    """
    Get the logged in users dispatcher id
    :param shh: The ArcREST security handler helper
    :param projectId: The projectId to use
    :param worker: The name of the worker to get the id of
    :return: The OBJECTID of the specified dispatcher
    """
    logger = logging.getLogger()
    logger.debug("Getting dispatcher id for: {}...".format(worker))
    worker_fl = workforcehelpers.get_workers_feature_layer(shh, projectId)
    workers = worker_fl.query(where="userId='{}'".format(worker))
    if workers.features:
        return workers.features[0].asDictionary["attributes"]["OBJECTID"]
    else:
        logger.critical("{} is not a worker".format(worker))
        return None
项目:wptagent    作者:WPO-Foundation    | 项目源码 | 文件源码
def get_decimate_filter():
    decimate = None
    try:
        filters = subprocess.check_output(
            ['ffmpeg', '-filters'], stderr=subprocess.STDOUT)
        lines = filters.split("\n")
        match = re.compile(
            r'(?P<filter>[\w]*decimate).*V->V.*Remove near-duplicate frames')
        for line in lines:
            m = re.search(match, line)
            if m is not None:
                decimate = m.groupdict().get('filter')
                break
    except BaseException:
        logging.critical('Error checking ffmpeg filters for decimate')
        decimate = None
    return decimate
项目:wptagent    作者:WPO-Foundation    | 项目源码 | 文件源码
def connect(self, task):
        """Connect to the dev tools interface"""
        ret = False
        from internal.devtools import DevTools
        self.devtools = DevTools(self.options, self.job, task, self.use_devtools_video)
        if task['running_lighthouse']:
            ret = self.devtools.wait_for_available(self.CONNECT_TIME_LIMIT)
        else:
            if self.devtools.connect(self.CONNECT_TIME_LIMIT):
                logging.debug("Devtools connected")
                ret = True
            else:
                task['error'] = "Error connecting to dev tools interface"
                logging.critical(task['error'])
                self.devtools = None
        return ret
项目:peerme    作者:cooperlees    | 项目源码 | 文件源码
def get_pool(self):
        HOST = self.global_config['peeringdb_mysql']['host']
        USER = self.global_config['peeringdb_mysql']['user']
        PASS = self.global_config['peeringdb_mysql']['pass']
        PORT = int(self.global_config['peeringdb_mysql']['port'])
        DATABASE = self.global_config['peeringdb_mysql']['database']
        try:
            self.pool = await aiomysql.create_pool(
                host=HOST,
                port=PORT,
                user=USER,
                password=PASS,
                db=DATABASE,
                loop=self.loop,
            )
        except pymysql_err.OperationalError as pmye:
            logging.critical("DB Connect Error: {}".format(pmye))
            sys.exit(1)

        logging.debug("Obtained DB connection pool to {}".format(HOST))
项目:icing    作者:slipguru    | 项目源码 | 文件源码
def distributions(self, records=None):
        logging.info("Analysing %s ...", self.database)
        try:
            if records is not None and isinstance(records, pd.DataFrame):
                max_mut = np.max(records['MUT'])
                self.n_samples = records.shape[0]
            else:
                # load from file
                max_mut, self.n_samples = io.get_max_mut(self.database)

            lin = np.linspace(0, max_mut, min(self.n_samples / 15., 12))
            sets = [(0, 0)] + zip(lin[:-1], lin[1:])
            if len(sets) == 1:
                # no correction needs to be applied
                return None
            out_muts = [self.intra_donor_distance(
                records, i, j) for i, j in zip(sets, sets)]
        except StandardError as msg:
            logging.critical(msg)
            out_muts = []

        my_dict = dict()
        for f, m in out_muts:
            my_dict.setdefault(m, []).append(f)
        return my_dict