Python logging 模块,fatal() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.fatal()

项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def __init__(self, manager, config, timer, base_dir, backup_dir, tailed_oplogs, backup_oplogs):
        super(Resolver, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir)
        self.tailed_oplogs = tailed_oplogs
        self.backup_oplogs = backup_oplogs

        self.compression_supported = ['none', 'gzip']
        self.resolver_summary      = {}
        self.resolver_state        = {}

        self.running   = False
        self.stopped   = False
        self.completed = False
        self._pool     = None
        self._pooled   = []
        self._results  = {}
        try:
            self._pool = Pool(processes=self.threads(None, 2))
        except Exception, e:
            logging.fatal("Could not start oplog resolver pool! Error: %s" % e)
            raise Error(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def __init__(self, config, timer, db):
        self.config             = config
        self.timer              = timer
        self.db                 = db
        self.balancer_wait_secs = self.config.sharding.balancer.wait_secs
        self.balancer_sleep     = self.config.sharding.balancer.ping_secs

        self.timer_name            = self.__class__.__name__
        self.config_server         = None
        self.config_db             = None
        self.mongos_db             = None
        self._balancer_state_start = None
        self.restored              = False

        # Get a DB connection
        try:
            if isinstance(self.db, DB):
                self.connection = self.db.connection()
                if not self.db.is_mongos() and not self.db.is_configsvr():
                    raise DBOperationError('MongoDB connection is not to a mongos or configsvr!')
            else:
                raise Error("'db' field is not an instance of class: 'DB'!")
        except Exception, e:
            logging.fatal("Could not get DB connection! Error: %s" % e)
            raise DBOperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def set_balancer(self, value):
        try:
            if self.is_gte_34():
                # 3.4+ configsvrs dont have balancerStart/Stop, even though they're the balancer!
                # Use self.get_mongos() to get a mongos connection for now
                if value is True:
                    self.get_mongos().admin_command("balancerStart")
                else:
                    self.get_mongos().admin_command("balancerStop")
            else:
                if value is True:
                    set_value = False
                elif value is False:
                    set_value = True
                else:
                    set_value = True
                config = self.connection['config']
                config['settings'].update_one({'_id': 'balancer'}, {'$set': {'stopped': set_value}})
        except Exception, e:
            logging.fatal("Failed to set balancer state! Error: %s" % e)
            raise DBOperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def stop_balancer(self):
        logging.info("Stopping the balancer and waiting a max of %i sec" % self.balancer_wait_secs)
        wait_cnt = 0
        self.timer.start(self.timer_name)
        self.set_balancer(False)
        while wait_cnt < self.balancer_wait_secs:
            if self.check_balancer_running():
                wait_cnt += self.balancer_sleep
                logging.info("Balancer is still running, sleeping for %i sec(s)" % self.balancer_sleep)
                sleep(self.balancer_sleep)
            else:
                self.timer.stop(self.timer_name)
                logging.info("Balancer stopped after %.2f seconds" % self.timer.duration(self.timer_name))
                return
        logging.fatal("Could not stop balancer %s!" % self.db.uri)
        raise DBOperationError("Could not stop balancer %s" % self.db.uri)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def get_config_server(self, force=False):
        if force or not self.config_server:
            configdb_uri = self.get_configdb_hosts()
            try:
                logging.info("Found sharding config server: %s" % configdb_uri)
                if self.db.uri.hosts() == configdb_uri.hosts():
                    self.config_db = self.db
                    logging.debug("Re-using seed connection to config server(s)")
                else:
                    self.config_db = DB(configdb_uri, self.config, True)
                if self.config_db.is_replset():
                    self.config_server = Replset(self.config, self.config_db)
                else:
                    self.config_server = {'host': configdb_uri.hosts()}
                    self.config_db.close()
            except Exception, e:
                logging.fatal("Unable to locate config servers using %s: %s!" % (self.db.uri, e))
                raise OperationError(e)
        return self.config_server
项目:omSipCreator    作者:KBNLresearch    | 项目源码 | 文件源码
def readChecksums(fileIn):
    # Read checksum file, return contents as nested list
    # Also strip away any file paths if they exist (return names only)

    try:
        data = []
        f = open(fileIn,"r", encoding="utf-8")
        for row in f:
            rowSplit = row.split(' ', 1)
            # Second col contains file name. Strip away any path components if they are present
            fileName = rowSplit[1].strip() # Raises IndexError if entry only 1 col (malformed checksum file)!
            rowSplit[1] = os.path.basename(fileName) 
            data.append(rowSplit)    
        f.close()
        return(data)
    except IOError:
        logging.fatal("cannot read '" + fileIn + "'")
        config.errors += 1
        errorExit(config.errors, config.warnings)
项目:tagberry    作者:csailer    | 项目源码 | 文件源码
def _logWriter(self,level,message,exception=None):

        self._logger.setLevel(level)
        self._fh.setLevel(level)
        self._ch.setLevel(level)
        if(exception!=None):
            exFormatted = self._formatException(exception)

        msg = "%s%s" % (message,exFormatted)

        if(level==logging.DEBUG):
           logging.debug(msg) 
        elif(level==logging.INFO):
           logging.info(msg) 
        elif(level==logging.WARN):
           logging.warn(msg) 
        elif(level==logging.FATAL):
           logging.fatal(msg) 
        if(level==logging.ERROR):
           logging.error(msg)
项目:CPU-Manager-for-Kubernetes    作者:Intel-Corp    | 项目源码 | 文件源码
def discover(conf_dir):

    version = k8s.get_kubelet_version(None)
    if version == "v1.8.0":
        logging.fatal("K8s 1.8.0 is not supported. Update K8s to "
                      "version >=1.8.1 or rollback to previous versions")

    if version >= "v1.8.1":
        # Patch the node with the appropriate CMK ER.
        logging.debug("Patching the node with the appropriate CMK ER.")
        add_node_er(conf_dir)
    else:
        # Patch the node with the appropriate CMK OIR.
        logging.debug("Patching the node with the appropriate CMK OIR.")
        add_node_oir(conf_dir)

    # Add appropriate CMK label to the node.
    logging.debug("Adding appropriate CMK label to the node.")
    add_node_label()
    # Add appropriate CMK taint to the node.
    logging.debug("Adding appropriate CMK taint to the node.")
    add_node_taint()


# add_node_oir patches the node with the appropriate CMK OIR.
项目:CommunityCellularManager    作者:facebookincubator    | 项目源码 | 文件源码
def send_msg(self, msg_type, ies):
        """
        Encodes and sends the message to the IPA layer.
        """
        # Calc the maximum length possible for the message, and allocate memory
        buf_size = _GSUP.get_max_bytes(ies)
        (buf, offset) = self._ipa_writer.get_write_buf(buf_size)

        try:
            msg_len = _GSUP.encode(buf, offset, msg_type, ies)
        except GSUPCodecError as err:
            # Encoding should always succeed
            logging.fatal(
                "Encoding failed with err: %s, for msg: %s, ies: %s",
                err, msg_type, ies)
            return

        # Reset the length in the IPA header based on actual msg size
        self._ipa_writer.reset_length(buf, msg_len - offset)

        # Write the encoded msg
        self._ipa_writer.write(buf[:msg_len])
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def __rebase_globals(old, new, size, iterable):
    node = internal.comment.tagging.node()
    failure, total = [], list(iterable)
    for i, (ea, count) in enumerate(total):
        # remove the old address
        ok = internal.netnode.alt.remove(node, ea)
        if not ok:
            logging.fatal("{:s}.rebase : Failure trying to remove refcount for {:x} : {!r}".format(__name__, ea, count))

        # now add the new address
        res = ea - old + new
        ok = internal.netnode.alt.set(node, res, count)
        if not ok:
            logging.fatal("{:s}.rebase : Failure trying to store refcount from {:x} to {:x} : {!r}".format(__name__, ea, res, count))

            failure.append((ea, res, count))
        yield i, ea
    return

# address naming
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def pop(self):
        '''Pop a result off of the result queue.'''
        cls = self.__class__
        if not self.thread.is_alive():
            logging.fatal("{:s}.pop : Refusing to wait for a result when execution queue has already terminated. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
            raise Queue.Empty

        logging.debug("{:s}.pop : Popping result off of execution queue. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
        try:
            _, res, err = self.result.get(block=0)
            if err != (None, None, None):
                t, e, tb = err
                raise t, e, tb
        finally:
            self.result.task_done()
        return res
项目:tor_core    作者:GrafeasGroup    | 项目源码 | 文件源码
def redis(self):
        """
        Lazy-loaded redis connection
        """
        from redis import StrictRedis
        import redis.exceptions

        try:
            url = os.environ.get('REDIS_CONNECTION_URL',
                                 'redis://localhost:6379/0')
            conn = StrictRedis.from_url(url)
            conn.ping()
        except redis.exceptions.ConnectionError:
            logging.fatal("Redis server is not running")
            raise
        return conn
项目:tor_core    作者:GrafeasGroup    | 项目源码 | 文件源码
def configure_redis():
    """
    Creates a connection to the local Redis server, then returns the active
    connection.

    :return: object: the active Redis object.
    """
    try:
        url = os.getenv('REDIS_CONNECTION_URL', 'redis://localhost:6379/0')
        redis_server = redis.StrictRedis.from_url(url)
        redis_server.ping()
    except redis.exceptions.ConnectionError:
        logging.fatal("Redis server is not running! Exiting!")
        sys.exit(1)

    return redis_server
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def parse_args(parser):
    """http://codereview.stackexchange.com/questions/79008/parse-a-config-file-
    and-add-to-command-line-arguments-using-argparse-in-python """
    args = parser.parse_args()
    if args.config_file:
        if not YAML_AVAILABLE:
            logging.fatal("Install PyYAML in order to use config files.")
            return args
        data = yaml.load(args.config_file)
        delattr(args, 'config_file')
        arg_dict = args.__dict__
        for key, value in data.items():
            if isinstance(value, list):
                for v in value:
                    arg_dict[key].append(v)
            else:
                arg_dict[key] = value
    return args
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def parse_param_string(param):
    """Parses a parameter string such as 'param1=x,param2=y'. Loads 
    config files if specified in the string. If ``param`` points to a
    file, load this file with YAML.
    """
    if not param:
        return {}
    if os.path.isfile(param):
        param = "config_file=%s" % param
    config = {}
    for pair in param.strip().split(","):
        (k,v) = pair.split("=", 1)
        if k == 'config_file':
            if not YAML_AVAILABLE:
                logging.fatal("Install PyYAML in order to use config files.")
            else:
                with open(v) as f:
                    data = yaml.load(f)
                    for config_file_key, config_file_value in data.items():
                        config[config_file_key] = config_file_value
        else:
            config[k] = v
    return config
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def set_up_predictor(self, nmt_model_path):
        """Initializes the predictor with the given NMT model. Code 
        following ``blocks.machine_translation.main``. 
        """
        self.src_vocab_size = self.config['src_vocab_size']
        self.trgt_vocab_size = self.config['trg_vocab_size']
        self.nmt_model = NMTModel(self.config)
        self.nmt_model.set_up()
        loader = LoadNMTUtils(nmt_model_path,
                              self.config['saveto'],
                              self.nmt_model.search_model)
        loader.load_weights()

        self.best_models = []
        self.val_bleu_curve = []
        self.src_sparse_feat_map = self.config['src_sparse_feat_map'] \
                if self.config['src_sparse_feat_map'] else FlatSparseFeatMap()
        if self.config['trg_sparse_feat_map']:
            logging.fatal("Cannot use bounded vocabulary predictor with "
                          "a target sparse feature map. Ignoring...")
        self.search_algorithm = MyopticSearch(samples=self.nmt_model.samples)
        self.search_algorithm.compile()
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def load_map(self, path):
        """Load a index map file. Mappings should be bijections, but
        there is no sanity check in place to verify this.

        Args:
            path (string): Path to the mapping file

        Returns:
            dict. Mapping from SGNMT index to slave predictor index
        """
        with open(path) as f:
            d = dict(map(int, line.strip().split(None, 1)) for line in f)
            if (d[utils.UNK_ID] != utils.UNK_ID
                    or d[utils.EOS_ID] != utils.EOS_ID
                    or d[utils.GO_ID] != utils.GO_ID):
                logging.fatal(
                   "idxmap %s contains non-identical maps for reserved indices"
                        % path)
            logging.debug("Loaded wmap from %s" % path)
            return [d[idx] if idx in d else 0 for idx in range(max(d)+1)]
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def _get_sentence_indices(range_param, src_sentences):
    """Helper method for ``do_decode`` which returns the indices of the
    sentence to decode

    Args:
        range_param (string): ``--range`` parameter from config
        src_sentences (list):  A list of strings. The strings are the
                               source sentences with word indices to 
                               translate (e.g. '1 123 432 2')
    """
    if args.range:
        try:
            if ":" in args.range:
                from_idx,to_idx = args.range.split(":")
            else:
                from_idx = int(args.range)
                to_idx = from_idx
            return xrange(int(from_idx)-1, int(to_idx))
        except Exception as e:
            logging.fatal("Invalid value for --range: %s" % e)
            return []
    if src_sentences is False:
        logging.fatal("Input method dummy requires --range")
        return []
    return xrange(len(src_sentences))
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def initial_states(self, batch_size, *args, **kwargs):
        """Returns the initial state depending on ``init_strategy``."""
        attended = kwargs['attended']
        if self.init_strategy == 'constant':
            initial_state = [tensor.repeat(self.parameters[2][None, :],
                                           batch_size,
                                           0)]
        elif self.init_strategy == 'last':
            initial_state = self.initial_transformer.apply(
                attended[0, :, -self.attended_dim:])
        elif self.init_strategy == 'average':
            initial_state = self.initial_transformer.apply(
                attended[:, :, -self.attended_dim:].mean(0))  
        else:
            logging.fatal("dec_init parameter %s invalid" % self.init_strategy)
        return initial_state
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def get_nmt_model_path(nmt_model_selector, nmt_config):
    """Get the path to the NMT model according the given NMT config.
    This switches between the most recent checkpoint, the best BLEU 
    checkpoint, or the latest parameters (params.npz). This method
    delegates to ``get_nmt_model_path_*``. This
    method relies on the global ``args`` variable.

    Args:
        nmt_model_selector (string): the ``--nmt_model_selector`` arg
                                     which defines the policy to decide
                                     which NMT model to load (params,
                                     bleu, or time)
        nmt_config (dict):  NMT configuration, see ``get_nmt_config()``

    Returns:
        string. Path to the NMT model file
    """
    if nmt_model_selector == 'params':
        return get_nmt_model_path_params(nmt_config)
    elif nmt_model_selector == 'bleu':
        return get_nmt_model_path_best_bleu(nmt_config)
    elif nmt_model_selector == 'time':
        return get_nmt_model_path_most_recent(nmt_config)
    logging.fatal("NMT model selector %s not available. Please double-check "
                  "the --nmt_model_selector parameter." % nmt_model_selector)
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def tf_get_nmt_predictor(args, nmt_path, nmt_config):
  """Get the TensorFlow NMT predictor.

  Args:
    args (object): SGNMT arguments from ``ArgumentParser``
    nmt_config (string): NMT configuration
    path (string): Path to NMT model or directory

  Returns:
    Predictor. An instance of ``TensorFlowNMTPredictor``
  """
  if not TENSORFLOW_AVAILABLE:
    logging.fatal("Could not find TensorFlow!")
    return None

  logging.info("Loading tensorflow nmt predictor")
  if os.path.isdir(nmt_path):
    nmt_config['train_dir'] = nmt_path
  elif os.path.isfile(nmt_path):
    nmt_config['model_path'] = nmt_path
  global session
  if not session:
    session = tf.Session()
  return TensorFlowNMTPredictor(args.cache_nmt_posteriors, nmt_config, session)
项目:sgnmt    作者:ucam-smt    | 项目源码 | 文件源码
def tf_get_rnnlm_predictor(rnnlm_path, rnnlm_config, variable_prefix="model"):
  """Get the TensorFlow RNNLM predictor.

  Args:    
    rnnlm_config (string): RNNLM configuration
    path (string): Path to RNNLM model or directory
    variable_prefix(string): prefix of model variables

  Returns:
    Predictor. An instance of ``TensorFlowRNNLMPredictor``
  """
  if not TENSORFLOW_AVAILABLE:
    logging.fatal("Could not find TensorFlow!")
    return None

  logging.info("Loading tensorflow rnnlm predictor")
  return TensorFlowRNNLMPredictor(rnnlm_path, rnnlm_config, variable_prefix)
项目:vpc-router    作者:romana    | 项目源码 | 文件源码
def run(self, handler):
        from wsgiref.simple_server import make_server, WSGIRequestHandler
        if self.quiet:
            class QuietHandler(WSGIRequestHandler):
                def log_request(*args, **kw):
                    pass
            self.options['handler_class'] = QuietHandler
        try:
            self.server = make_server(self.host, self.port, handler,
                                      **self.options)
            self.romana_http.wsgi_server_started = True
            logging.info("HTTP server: Started to listen...")
            self.server.serve_forever()
        except socket.error as e:
            logging.fatal("HTTP server: Cannot open socket "
                          "(error %d: %s)... " %
                          (e.errno, e.strerror))
项目:vessel-classification    作者:GlobalFishingWatch    | 项目源码 | 文件源码
def _parse(x):
    if isinstance(x, datetime.datetime):
        return x
    # 2014-08-28T13:56:16+00:00
    # TODO: fix generation to generate consistent datetimes
    if x[-6:] == '+00:00':
        x = x[:-6]
    if x.endswith('.999999'):
        x = x[:-7]
    if x.endswith('Z'):
        x = x[:-1]
    try:
        dt = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
    except:
        logging.fatal('Could not parse "%s"', x)
        raise
    return dt.replace(tzinfo=pytz.UTC)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def open(self):
        if not self._oplog:
            try:
                logging.debug("Opening oplog file %s" % self.oplog_file)
                if self.do_gzip:
                    self._oplog  = GzipFile(self.oplog_file, self.file_mode)
                else:
                    self._oplog = open(self.oplog_file, self.file_mode)
            except Exception, e:
                logging.fatal("Error opening oplog file %s! Error: %s" % (self.oplog_file, e))
                raise OperationError(e)
        return self._oplog
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def load(self):
        try:
            oplog = self.open()
            logging.debug("Reading oplog file %s" % self.oplog_file)
            for change in decode_file_iter(oplog, CodecOptions(unicode_decode_error_handler="ignore")):
                if 'ts' in change:
                    self._last_ts = change['ts']
                if self._first_ts is None and self._last_ts is not None:
                    self._first_ts = self._last_ts
                self._count += 1
            oplog.close()
        except Exception, e:
            logging.fatal("Error reading oplog file %s! Error: %s" % (self.oplog_file, e))
            raise OperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def add(self, doc, autoflush=True):
        try:
            self._oplog.write(BSON.encode(doc))
            self._writes_unflushed += 1
            self._count            += 1
            if not self._first_ts:
                self._first_ts = doc['ts']
            self._last_ts = doc['ts']
            if autoflush:
                self.autoflush()
        except Exception, e:
            logging.fatal("Cannot write to oplog file %s! Error: %s" % (self.oplog_file, e))
            raise OperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def run(self):
        try:
            for mp in self.bucket.get_all_multipart_uploads():
                if mp.id == self.multipart_id:
                    logging.info("Uploading file: %s (part num: %s)" % (self.file_name, self.part_num))
                    with FileChunkIO(self.file_name, 'r', offset=self.offset, bytes=self.byte_count) as fp:
                        mp.upload_part_from_file(fp=fp, part_num=self.part_num)
                    logging.debug("Uploaded file: %s (part num: %s)" % (self.file_name, self.part_num))
                    break
        except Exception, e:
            logging.fatal("AWS S3 multipart upload failed after %i retries! Error: %s" % (self.retries, e))
            sys.exit(1)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def restore_balancer_state(self):
        if self._balancer_state_start is not None and not self.restored:
            try:
                logging.info("Restoring balancer state to: %s" % str(self._balancer_state_start))
                self.set_balancer(self._balancer_state_start)
                self.restored = True
            except Exception, e:
                logging.fatal("Failed to set balancer state! Error: %s" % e)
                raise DBOperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def setup_signal_handlers(self):
        try:
            signal.signal(signal.SIGINT, self.cleanup_and_exit)
            signal.signal(signal.SIGTERM, self.cleanup_and_exit)
        except Exception, e:
            logging.fatal("Cannot setup signal handlers, error: %s" % e)
            sys.exit(1)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def get_lock(self):
        # noinspection PyBroadException
        try:
            if not self.config.lock_file:
                self.config.lock_file = '/tmp/%s.lock' % self.program_name
            self.lock = Lock(self.config.lock_file)
        except Exception:
            logging.fatal("Could not acquire lock: '%s'! Is another %s process running? Exiting" % (self.config.lock_file, self.program_name))
            self.logger.compress(True)
            sys.exit(1)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def exception(self, error_message, error):
        self.last_error_msg = error_message
        if isinstance(error, NotifyError):
            logging.error(error_message)
        else:
            if isinstance(error, OperationError):
                logging.fatal(error_message)
            else:
                logging.exception(error_message)
            return self.cleanup_and_exit(None, None)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def __init__(self, config, db):
        self.config         = config
        self.db             = db
        self.read_pref_tags = self.config.replication.read_pref_tags
        self.max_lag_secs   = self.config.replication.max_lag_secs
        self.min_priority   = self.config.replication.min_priority
        self.max_priority   = self.config.replication.max_priority
        self.hidden_only    = self.config.replication.hidden_only

        self.state_primary   = 1
        self.state_secondary = 2
        self.state_arbiter   = 7
        self.hidden_weight   = 0.20
        self.pri0_weight     = 0.10

        self.replset      = True
        self.rs_config    = None
        self.rs_status    = None
        self.primary      = None
        self.secondary    = None
        self.mongo_config = None

        self.replset_summary = {}

        # Get a DB connection
        try:
            if isinstance(self.db, DB):
                self.connection = self.db.connection()
            else:
                raise Error("'db' field is not an instance of class: 'DB'!")
        except Exception, e:
            logging.fatal("Could not get DB connection! Error: %s" % e)
            raise OperationError(e)
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def auth_if_required(self):
        if self.username is not None and self.password is not None:
            try:
                logging.debug("Authenticating connection with username: %s" % self.username)
                self._conn[self.authdb].authenticate(self.username, self.password)
            except OperationFailure, e:
                logging.fatal("Unable to authenticate with host %s: %s" % (self.uri, e))
                raise DBAuthenticationError(e)
        else:
            pass
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def run(self):
        if os.path.isdir(self.backup_dir):
            if not os.path.isfile(self.output_file):
                try:
                    backup_base_dir  = os.path.dirname(self.backup_dir)
                    backup_base_name = os.path.basename(self.backup_dir)

                    log_msg   = "Archiving directory: %s" % self.backup_dir
                    cmd_flags = ["-C", backup_base_dir, "-cf", self.output_file, "--remove-files", backup_base_name]

                    if self.do_gzip():
                        log_msg   = "Archiving and compressing directory: %s" % self.backup_dir
                        cmd_flags = ["-C", backup_base_dir, "-czf", self.output_file, "--remove-files", backup_base_name]

                    logging.info(log_msg)
                    self.running  = True
                    self._command = LocalCommand(self.binary, cmd_flags, self.verbose)
                    self.exit_code = self._command.run()
                except Exception, e:
                    logging.fatal("Failed archiving file: %s! Error: %s" % (self.output_file, e))
                finally:
                    self.running   = False
                    self.stopped   = True
                    self.completed = True
            else:
                logging.fatal("Output file: %s already exists!" % self.output_file)
            return self.backup_dir
项目:mongodb_consistent_backup    作者:Percona-Lab    | 项目源码 | 文件源码
def run(self):
        try:
            thread_count = self.threads()
            self._pool   = Pool(processes=thread_count)
            logging.info("Archiving backup directories with pool of %i thread(s)" % thread_count)
        except Exception, e:
            logging.fatal("Could not start pool! Error: %s" % e)
            raise Error(e)

        if os.path.isdir(self.backup_dir):
            try:
                self.running = True
                for backup_dir in os.listdir(self.backup_dir):
                    subdir_name = os.path.join(self.backup_dir, backup_dir)
                    if not os.path.isdir(os.path.join(subdir_name, "dump")):
                        continue
                    output_file = "%s.tar" % subdir_name
                    if self.do_gzip():
                        output_file  = "%s.tgz" % subdir_name
                    self._pool.apply_async(TarThread(subdir_name, output_file, self.compression(), self.verbose, self.binary).run, callback=self.done)
                    self._pooled.append(subdir_name)
            except Exception, e:
                self._pool.terminate()
                logging.fatal("Could not create tar archiving thread! Error: %s" % e)
                raise Error(e)
            finally:
                self.wait()
                self.completed = True
项目:tagberry    作者:csailer    | 项目源码 | 文件源码
def fatal(self,message,exception):
        self._logWriter(logging.FATAL,message,exception)
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def get_symb_mus(self, mus, sigmas, corxy, pis, prediction_method="pi"):
        """
        Can be used to train an autoencoder that given location
        trains a mixture density layer and then outputs the same
        location
        symbolycally predict the mu that maximizes the mixture model
        either based on mixture probability of the component
        with highest pi, see pred_sharedparams
        """
        if prediction_method == "mixture":
            """
            sigmainvs = 1.0 / sigmas
            sigmainvprods = sigmainvs[:,:, 0] * sigmainvs[:,:, 1]
            sigmas2 = sigmas ** 2
            corxy2 = corxy **2
            diff2 = diff ** 2
            diffsigma = diff2 / sigmas2
            diffsigmanorm = np.sum(diffsigma, axis=-1)
            z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
            oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
            expterm = np.exp(-0.5 * z * oneminuscorxy2inv)
            expterm = 1.0
            probs = (0.5 / np.pi) * sigmainvprods * T.sqrt(oneminuscorxy2inv) * expterm
            probs = pis * probs
            """
            logging.fatal("not implemented!")
            sys.exit()
        elif prediction_method == "pi":    
            preds = T.argmax(pis, axis=1)
            selected_mus = mus[T.arange(mus.shape[0]), preds, :]
            return selected_mus
项目:geomdn    作者:afshinrahimi    | 项目源码 | 文件源码
def get_symb_mus(self, mus, sigmas, corxy, pis, prediction_method="pi"):
        """
        Can be used to train an autoencoder that given location
        trains a mixture density layer and then outputs the same
        location
        symbolycally predict the mu that maximizes the mixture model
        either based on mixture probability of the component
        with highest pi, see pred_sharedparams
        """
        if prediction_method == "mixture":
            #sigmainvs = 1.0 / sigmas
            #sigmainvprods = sigmainvs[:,:, 0] * sigmainvs[:,:, 1]
            #sigmas2 = sigmas ** 2
            #corxy2 = corxy **2
            #diff2 = diff ** 2
            #diffsigma = diff2 / sigmas2
            #diffsigmanorm = np.sum(diffsigma, axis=-1)
            #z = diffsigmanorm - 2 * corxy * diffprod * sigmainvprods
            #oneminuscorxy2inv = 1.0 / (1.0 - corxy2)
            #expterm = np.exp(-0.5 * z * oneminuscorxy2inv)
            #expterm = 1.0
            #probs = (0.5 / np.pi) * sigmainvprods * T.sqrt(oneminuscorxy2inv) * expterm
            #probs = pis * probs
            logging.fatal("not implemented!")
            sys.exit()
        elif prediction_method == "pi":
            preds = T.argmax(pis, axis=1)
            selected_mus = mus[T.arange(mus.shape[0]),preds,:]
            return selected_mus
项目:stratosphere    作者:victortrac    | 项目源码 | 文件源码
def wait_for_completion(project, result):
    print('Waiting for deployment {}...'.format(result['name']))
    last_event = result
    while not last_event['status'] in ['DONE', ]:
        time.sleep(1)
        last_event = dm.operations().get(project=project, operation=last_event['name']).execute()
        logger.info('Operation: {name}, TargetLink: {targetLink}, Progress: {progress}, Status: {status}'
                    .format(**last_event))
    if len(last_event.get('error', [])):
        logging.error('*** Stack apply failed! ***')
        logging.fatal(pprint.pprint(last_event))
        sys.exit(1)
    else:
        print('Stack action complete.')
项目:m16c-interface    作者:q3k    | 项目源码 | 文件源码
def dump(args, s):
    # Run target clock at 6MHz.
    s.adapter.set_tclk(0)
    # Run target serial clock at 1.5MHz
    s.adapter.set_sclk(127)

    try:
        code = args.code.decode('hex')
    except TypeError:
        logging.fatal("Code must be in hexadecimal format.")
        return
    if len(code) != 7:
        logging.fatal("Code must be 7 bytes long.")
        return

    s.unlock(code)
    status = s.unlock_status()
    if status != serialio.UNLOCK_SUCCESSFUL:
        logging.fatal("Target did not unlock.")
        return
    logging.info("Target unlocked.")

    start = 0x0e00
    end = 0x0fff

    with open(args.output, 'w') as f:
        logging.info("Writing pages {:x}-{:x} to {}...".format(start, end,
                                                               args.output))
        for page in range(start, end+1):
            logging.debug("Dumping {:x}00-{:x}ff...".format(page, page))
            data = s.read_page(page)
            f.write(data)
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def noapi(*args):
    fr = sys._getframe().f_back
    if fr is None:
        logging.fatal("{:s}.noapi : Unexpected empty frame from caller. Continuing.. : {!r} : {!r}".format('.'.join(("internal",__name__)), sys._getframe(), sys._getframe().f_code))
        return hook.CONTINUE

    return internal.interface.priorityhook.CONTINUE if fr.f_back is None else internal.interface.priorityhook.STOP
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def changing(cls, ea, repeatable_cmt, newcmt):
        oldcmt = idaapi.get_cmt(ea, repeatable_cmt)
        try: cls.event.send((ea, bool(repeatable_cmt), newcmt))
        except StopIteration, e:
            logging.fatal("{:s}.changing : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def changed(cls, ea, repeatable_cmt):
        newcmt = idaapi.get_cmt(ea, repeatable_cmt)
        try: cls.event.send((ea, bool(repeatable_cmt), None))
        except StopIteration, e:
            logging.fatal("{:s}.changed : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def changed(cls, cb, a, cmt, repeatable):
        fn = idaapi.get_func(a.startEA)
        newcmt = idaapi.get_func_cmt(fn, repeatable)
        try: cls.event.send((fn.startEA, bool(repeatable), None))
        except StopIteration, e:
            logging.fatal("{:s}.changed : Unexpected termination of event handler. Re-instantiating it.".format('.'.join((__name__,cls.__name__))))
            cls.event = cls._event(); next(cls.event)

### database scope
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def __rebase_function(old, new, size, iterable):
    key = internal.comment.tagging.__address__
    failure, total = [], list(iterable)

    for i, fn in enumerate(total):
        # grab the contents dictionary
        try:
            state = internal.comment.contents._read(None, fn)
        except LookupError:
            logging.fatal("{:s}.rebase : Address {:x} -> {:x} is not a function : {:x} -> {:x}".format(__name__, fn - new + old, fn, old, new))
            state = None
        if state is None: continue

        # now we can erase the old one
        res = fn - new + old
        internal.comment.contents._write(res, None, None)

        # update the addresses
        res, state[key] = state[key], {ea - old + new : ref for ea,ref in state[key].iteritems()}

        # and put the new addresses back
        ok = internal.comment.contents._write(None, fn, state)
        if not ok:
            logging.fatal("{:s}.rebase : Failure trying to write refcount for {:x} : {!r} : {!r}".format(__name__, fn, res, state[key]))
            failure.append((fn, res, state[key]))

        yield i, fn
    return
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def prevreg(cls, ea, reg, *regs, **modifiers):
        regs = (reg,) + regs
        count = modifiers.get('count', 1)
        args = ', '.join(["{:x}".format(ea)] + __builtin__.map("\"{:s}\"".format, regs) + __builtin__.map(utils.unbox("{:s}={!r}".format), modifiers.items()))

        # generate each helper using the regmatch class
        iterops = interface.regmatch.modifier(**modifiers)
        uses_register = interface.regmatch.use(regs)

        # if within a function, then sure we're within the chunk's bounds.
        if function.within(ea):
            (start, _) = function.chunk(ea)
            fwithin = functools.partial(operator.le, start)

        # otherwise ensure that we're not in the function and we're a code type.
        else:
            fwithin = utils.compose(utils.fap(utils.compose(function.within, operator.not_), type.is_code), all)

            start = cls.walk(ea, cls.prev, fwithin)
            start = top() if start == idaapi.BADADDR else start

        # define a function for cls.walk to continue looping while
        F = lambda ea: fwithin(ea) and not any(uses_register(ea, opnum) for opnum in iterops(ea))

        # skip the current address
        prevea = cls.prev(ea)
        if prevea is None:
            # FIXME: include registers in message
            logging.fatal("{:s}.prevreg({:s}, ...) : Unable to start walking from previous address. : {:x}".format('.'.join((__name__, cls.__name__)), args, ea))
            return ea

        # now walk while none of our registers match
        res = cls.walk(prevea, cls.prev, F)
        if res == idaapi.BADADDR or (cls == address and res < start):
            # FIXME: include registers in message
            raise ValueError("{:s}.prevreg({:s}, ...) : Unable to find register{:s} within chunk. {:x}:{:x} : {:x}".format('.'.join((__name__, cls.__name__)), args, ('s','')[len(regs)>1], start, ea, res))

        # recurse if the user specified it
        modifiers['count'] = count - 1
        return cls.prevreg( cls.prev(res), *regs, **modifiers) if count > 1 else res
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def nextreg(cls, ea, reg, *regs, **modifiers):
        regs = (reg,) + regs
        count = modifiers.get('count',1)
        args = ', '.join(["{:x}".format(ea)] + __builtin__.map("\"{:s}\"".format, regs) + __builtin__.map(utils.unbox("{:s}={!r}".format), modifiers.items()))

        # generate each helper using the regmatch class
        iterops = interface.regmatch.modifier(**modifiers)
        uses_register = interface.regmatch.use(regs)

        # if within a function, then sure we're within the chunk's bounds.
        if function.within(ea):
            (_,end) = function.chunk(ea)
            fwithin = functools.partial(operator.gt, end)

        # otherwise ensure that we're not in a function and we're a code type.
        else:
            fwithin = utils.compose(utils.fap(utils.compose(function.within, operator.not_), type.is_code), all)

            end = cls.walk(ea, cls.next, fwithin)
            end = bottom() if end == idaapi.BADADDR else end

        # define a function for cls.walk to continue looping while
        F = lambda ea: fwithin(ea) and not any(uses_register(ea, opnum) for opnum in iterops(ea))

        # skip the current address
        nextea = cls.next(ea)
        if nextea is None:
            # FIXME: include registers in message
            logging.fatal("{:s}.nextreg({:s}) : Unable to start walking from next address. : {:x}".format('.'.join((__name__, cls.__name__)), args, ea))
            return ea

        # now walk while none of our registers match
        res = cls.walk(nextea, cls.next, F)
        if res == idaapi.BADADDR or (cls == address and res >= end):
            # FIXME: include registers in message
            raise ValueError("{:s}.nextreg({:s}, ...) : Unable to find register{:s} within chunk {:x}:{:x} : {:x}".format('.'.join((__name__, cls.__name__)), args, ('s','')[len(regs)>1], end, ea, res))

        # recurse if the user specified it
        modifiers['count'] = count - 1
        return cls.nextreg(cls.next(res), *regs, **modifiers) if count > 1 else res
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def next(cls, ea, count):
        ea = interface.address.within(ea)
        isStop = lambda ea: _instruction.feature(ea) & idaapi.CF_STOP == idaapi.CF_STOP
        invalidQ = utils.compose(utils.fap(utils.compose(type.is_code, operator.not_), isStop), any)
        refs = filter(type.is_code, xref.down(ea))
        if len(refs) > 1:
            logging.fatal("{:s}.next({:x}, count={:d}) : Unable to determine next address due to multiple xrefs being available : {:s}".format('.'.join((__name__, cls.__name__)), ea, count, ', '.join(__builtin__.map("{:x}".format,refs))))
            return None
        if invalidQ(ea) and not _instruction.is_jmp(ea):
#            logging.fatal("{:s}.next({:x}, count={:d}) : Unable to move to next address. Flow has stopped.".format('.'.join((__name__, cls.__name__)), ea, count))
            return None
        res = refs[0] if _instruction.is_jmp(ea) else address.next(ea)
        return cls.next(res, count-1) if count > 1 else res
项目:idascripts    作者:ctfhacker    | 项目源码 | 文件源码
def start(self):
        '''Start to dispatch callables in the execution queue.'''
        cls = self.__class__
        if not self.thread.is_alive():
            logging.fatal("{:s}.start : Unable to resume an already terminated execution queue. : {!r}".format('.'.join(('internal',__name__,cls.__name__)), self))
            return False
        logging.info("{:s}.start : Resuming execution queue. :{!r}".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
        res, _ = self.ev_unpaused.is_set(), self.ev_unpaused.set()
        self.queue.acquire()
        self.queue.notify_all()
        self.queue.release()
        return not res