Python logging 模块,warning() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.warning()

项目:electrum-martexcoin-server    作者:martexcoin    | 项目源码 | 文件源码
def __init__(self, addr, requestHandler=StratumJSONRPCRequestHandler,
                 logRequests=False, encoding=None, bind_and_activate=True,
                 address_family=socket.AF_INET):
        self.logRequests = logRequests
        StratumJSONRPCDispatcher.__init__(self, encoding)
        # TCPServer.__init__ has an extra parameter on 2.6+, so
        # check Python version and decide on how to call it
        vi = sys.version_info
        self.address_family = address_family
        if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX:
            # Unix sockets can't be bound if they already exist in the
            # filesystem. The convention of e.g. X11 is to unlink
            # before binding again.
            if os.path.exists(addr):
                try:
                    os.unlink(addr)
                except OSError:
                    logging.warning("Could not unlink socket %s", addr)

        SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)

        if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
            flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def update_hyper_param(self):
    for group in self._optimizer.param_groups:
      group['momentum'] = self._mu_t
      #group['momentum'] = max(self._mu, self._mu_t)
      if self._force_non_inc_step == False:
        group['lr'] = self._lr_t * self._lr_factor
        # a loose clamping to prevent catastrophically large move. If the move
        # is too large, we set lr to 0 and only use the momentum to move
        if self._adapt_clip and (group['lr'] * np.sqrt(self._global_state['grad_norm_squared']) >= self._catastrophic_move_thresh):
          group['lr'] = self._catastrophic_move_thresh / np.sqrt(self._global_state['grad_norm_squared'] + eps)
          if self._verbose:
            logging.warning("clip catastropic move!")
      elif self._iter > self._curv_win_width:
        # force to guarantee lr * grad_norm not increasing dramatically. 
        # Not necessary for basic use. Please refer to the comments
        # in YFOptimizer.__init__ for more details
        self.lr_grad_norm_avg()
        debias_factor = self.zero_debias_factor()
        group['lr'] = min(self._lr * self._lr_factor,
          2.0 * self._global_state["lr_grad_norm_avg_min"] \
          / (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) )
    return
项目:kas    作者:siemens    | 项目源码 | 文件源码
def _read_stream(stream, callback):
    """
        This asynchronous method reads from the output stream of the
        application and transfers each line to the callback function.
    """
    while True:
        line = yield from stream.readline()
        try:
            line = line.decode('utf-8')
        except UnicodeDecodeError as err:
            logging.warning('Could not decode line from stream, ignore it: %s',
                            err)
        if line:
            callback(line)
        else:
            break
项目:kas    作者:siemens    | 项目源码 | 文件源码
def ssh_setup_agent(config, envkeys=None):
    """
        Starts the ssh-agent
    """
    envkeys = envkeys or ['SSH_PRIVATE_KEY']
    output = os.popen('ssh-agent -s').readlines()
    for line in output:
        matches = re.search(r"(\S+)\=(\S+)\;", line)
        if matches:
            config.environ[matches.group(1)] = matches.group(2)

    for envkey in envkeys:
        key = os.environ.get(envkey)
        if key:
            ssh_add_key(config.environ, key)
        else:
            logging.warning('%s is missing', envkey)
项目:kas    作者:siemens    | 项目源码 | 文件源码
def setup_environ(self):
        """
            Sets the environment variables for process that are
            started by kas.
        """
        distro_base = get_distro_id_base().lower()
        if distro_base in ['fedora', 'suse', 'opensuse']:
            self.environ = {'LC_ALL': 'en_US.utf8',
                            'LANG': 'en_US.utf8',
                            'LANGUAGE': 'en_US'}
        elif distro_base in ['debian', 'ubuntu']:
            self.environ = {'LC_ALL': 'en_US.UTF-8',
                            'LANG': 'en_US.UTF-8',
                            'LANGUAGE': 'en_US:en'}
        else:
            logging.warning('kas: "%s" is not a supported distro. '
                            'No default locales set.', distro_base)
            self.environ = {}
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def getPort(self, name):
        """The default behavior of getPort() will automatically
        return ports as defined by 'usesport' and 'providesport'
        static class attributes."""
        self._log.trace("getPort(%s)", name)
        try:
            portdef = self.__ports[name]
        except KeyError:
            self._log.warning("getPort() could not find port %s", name)
            raise CF.PortSupplier.UnknownPort()
        else:
            portobj = portdef.__get__(self)
            if portobj == None:
                self._log.warning("component did not implement port %s",name)
                raise CF.PortSupplier.UnknownPort()
            port = portobj._this()
            if not portdef.isValid(port):
                self._log.warning("getPort() for %s did match required repid", name)
            self._log.trace("getPort() --> %s", port)
            return port
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def GetSCAFileContents( url ):
    fileContents = None
    scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
    if scheme=="sca" :
       queryAsDict = dict([x.split("=") for x in query.split("&")])
       try:
           orb=CORBA.ORB_init()
           fileSys = orb.string_to_object(queryAsDict["fs"])
       except KeyError:
            logging.warning("sca URI missing fs query parameter")
       except:
           logging.warning("Unable to get ORB reference")
       else:
            if fileSys == None:
                logging.warning("Failed to lookup file system")
            else:
                try:
                    scaFile = fileSys.open(path, True)
                    fileSize = scaFile.sizeOf()
                    fileContents = scaFile.read(fileSize)
                    scaFile.close()
                finally:
                    pass
    return fileContents
项目:PyPlanet    作者:PyPlanet    | 项目源码 | 文件源码
def handle_payload(self, handle_nr, method=None, data=None, fault=None):
        """
        Handle a callback/response payload or fault.

        :param handle_nr: Handler ID
        :param method: Method name
        :param data: Parsed payload data.
        :param fault: Fault object.
        """
        if handle_nr in self.handlers:
            await self.handle_response(handle_nr, method, data, fault)
        elif method and data is not None:
            if method == 'ManiaPlanet.ModeScriptCallbackArray':
                await self.handle_scripted(handle_nr, method, data)
            elif method == 'ManiaPlanet.ModeScriptCallback':
                await self.handle_scripted(handle_nr, method, data)
            else:
                await self.handle_callback(handle_nr, method, data)
        elif fault is not None:
            raise TransportException('Handle payload got invalid parameters, see fault exception! {}'.format(fault)) from fault
        else:
            print(method, handle_nr, data)
            logging.warning('Received gbx data, but handle wasn\'t known or payload invalid: handle_nr: {}, method: {}'.format(
                handle_nr, method,
            ))
项目:PyPlanet    作者:PyPlanet    | 项目源码 | 文件源码
def finish_reservations(self):  # pragma: no cover
        """
        The method will copy all reservations to the actual signals. (PRIVATE)
        """
        for sig_name, recs in self.reserved.items():
            for func, kwargs in recs:
                try:
                    signal = self.get_signal(sig_name)
                    signal.connect(func, **kwargs)
                except Exception as e:
                    logging.warning('Signal not found: {}, {}'.format(
                        sig_name, e
                    ), exc_info=sys.exc_info())

        for sig_name, recs in self.reserved_self.items():
            for func, slf in recs:
                try:
                    signal = self.get_signal(sig_name)
                    signal.set_self(func, slf)
                except Exception as e:
                    logging.warning(str(e), exc_info=sys.exc_info())

        self.reserved = dict()
        self.reserved_self = dict()
项目:PyPlanet    作者:PyPlanet    | 项目源码 | 文件源码
def load_matchsettings(self, filename):
        """
        Load Match Settings file and insert it into the current map playlist.

        :param filename: File to load, relative to Maps folder.
        :return: Boolean if loaded.
        """
        try:
            if not await self._instance.storage.driver.exists(
                os.path.join(self._instance.storage.MAP_FOLDER, filename)
            ):
                raise MapException('Can\'t find match settings file. Does it exist?')
            else:
                self._instance.gbx('LoadMatchSettings', filename)
        except Exception as e:
            logging.warning('Can\'t load match settings!')
            raise MapException('Can\'t load matchsettings according the dedicated server, tried loading from \'{}\'!'.format(filename)) from e
项目:django_pipedrive    作者:MasAval    | 项目源码 | 文件源码
def index(request):

    enable_hstore()

    try:
        if request.method == 'POST':

            json_data = json.loads(request.body)
            meta = json_data[u'meta']

            # API v1
            if meta[u'v'] == 1:
                return handle_v1(json_data)
            else:
                raise NonImplementedVersionException()

    except IntegrityError as e:
        logging.warning(e.message)
        logging.warning("Forcing full sync from pipedrive")
        PipedriveModel.sync_from_pipedrive()

    return HttpResponse("Hello, world!")
项目:django_pipedrive    作者:MasAval    | 项目源码 | 文件源码
def sync_one(cls, external_id, last_error=None):
        post_data = cls.pipedrive_api_client.get_instance(external_id)

        # Error code from the API
        if not post_data[u'success']:
            logging.error(post_data)
            raise UnableToSyncException(cls, external_id)
        try:
            return cls.update_or_create_entity_from_api_post(post_data[u'data'])
        except IntegrityError as e:
            logging.warning(e)
            if e.message == last_error:
                raise SameErrorTwiceSyncException(cls, external_id, e.message)
            match = re.search('.*Key \((.*)\)=\((.*)\).*', e.message)
            if match:
                field_name = match.group(1)
                field_id = match.group(2)
                model = cls.field_model_map(field_name)
                model.sync_one(field_id)
                return cls.sync_one(external_id, e.message)
            else:
                raise Exception("Could not handle error message")
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def _create_daily_stats(self, perfs):
        # create daily and cumulative stats dataframe
        daily_perfs = []
        # TODO: the loop here could overwrite expected properties
        # of daily_perf. Could potentially raise or log a
        # warning.
        for perf in perfs:
            if 'daily_perf' in perf:

                perf['daily_perf'].update(
                    perf['daily_perf'].pop('recorded_vars')
                )
                perf['daily_perf'].update(perf['cumulative_risk_metrics'])
                daily_perfs.append(perf['daily_perf'])
            else:
                self.risk_report = perf

        daily_dts = [np.datetime64(perf['period_close'], utc=True)
                     for perf in daily_perfs]
        daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)

        return daily_stats
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def rows_are_valid_csv(rows):
    """
    Determine whether the rows comprise a readable simple CSV,
    with a lane number, sample and index (in that order)
    :type rows: list[list[string]]
    :rtype: bool
    """
    if not rows:
        return False

    if row_is_simple_header(rows[0]):
        data_idx = 1
    else:
        data_idx = 0

    pop_rows = [row for row in rows[data_idx:] if row]
    tuples = [row_is_simple_data(row) for row in pop_rows]
    for tup in tuples:
        if tup[1]:
            logging.warning(tup[1])

    return all([tup[0] for tup in tuples])
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def rows_are_iem_samplesheet(rows):
    """
    Determine whether the rows comprise an Illumina Experiment Manager (IEM)
    sample sheet by checking for the presence of a [Data] section with
    sample header.

    :type rows: list[list[string]]
    :rtype: bool
    """
    # criteria: has to have [Data] section with recognized sample index.
    section_gen = rows_iem_section_generator(rows)
    for section in section_gen:
        if section_is_valid_data(section):
            if not iem_rows_all_have_sample_id(section.rows):
                logging.warning("Blank Sample_ID entries detected in data section")
                return False
            else:
                return True
    return False
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _create_lambda(arn, func_name, func_desc, lambda_handler, lambda_main,
                   runtime):
    func = dict()
    lamb = boto3.client('lambda')
    with open(temp_deploy_zip) as deploy:
        func['ZipFile'] = deploy.read()
    try:
        resp = lamb.create_function(
            FunctionName=func_name, Runtime=runtime, Publish=True,
            Description=func_desc,
            Role=arn, Code=func, Handler='{0}.{1}'.format(
                lambda_main, lambda_handler
            ))
        logging.info("Create Lambda Function resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating function '{1}'.".format(
                ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _create_function_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')

    try:
        resp = lamb.create_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        logging.info("Create Lambda Alias resp:{0}".format(
            json.dumps(resp, indent=4, sort_keys=True))
        )
        return resp
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning("Validation Error {0} creating alias '{1}'.".format(
                ce, func_alias))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _update_lambda_function(zip_file, func_name):
    lamb = boto3.client('lambda')
    try:
        resp = lamb.update_function_code(
            FunctionName=func_name,
            ZipFile=zip_file.read(),
            Publish=True
        )
        return resp['Version']
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning(
                "Validation Error {0} updating function '{1}'.".format(
                    ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:aws-greengrass-mini-fulfillment    作者:awslabs    | 项目源码 | 文件源码
def _update_lambda_alias(func_alias, func_name, func_version):
    lamb = boto3.client('lambda')
    try:
        resp = lamb.update_alias(
            Name=func_alias,
            FunctionName=func_name,
            FunctionVersion=func_version
        )
        return resp['AliasArn']
    except ClientError as ce:
        if ce.response['Error']['Code'] == 'ValidationException':
            logging.warning(
                "Validation Error {0} updating alias '{1}'.".format(
                    ce, func_name))
        else:
            logging.error("Unexpected Error: {0}".format(ce))
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def sample_posterior(self, session=None, return_stats=False, **kwargs):
        """
        Returns a new sample from the posterior distribution of the parameters.
        :param return_stats: Whether to return sampling process statistics
        :return: the generated sample
        """

        # make a number of tries to draw a sample
        for i in range(self.draw_retries_num):
            sample, stats = self._sample_posterior(session=session, return_stats=return_stats, **kwargs)
            if sample is not None:
                break

        if sample is not None:
            self.sample_number += 1
        else:
            logging.warning('Impossible to draw a sample with the specified parameters.')

        if return_stats:
            return sample, stats

        return sample
项目:pg-wikipedia    作者:gitenberg-dev    | 项目源码 | 文件源码
def get_item_summary(wd_id, lang='en'):
    if wd_id is None:
        return None
    try:
        r = requests.get(u'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(wd_id))
    except:
        logging.warning( u"couldn't get https://www.wikidata.org/wiki/Special:EntityData/{}.json".format(wd_id))
        return ""
    try:
        title = r.json()['entities'][wd_id]['sitelinks']['{}wiki'.format(lang)]['title']
        try:
            return wikipedia.summary(title)
        except (PageError,WikipediaException,DisambiguationError):
            logging.warning(u"couldn't get wikipedia.summary({})".format(title))
            return ''
    except ValueError:
        #not JSON
        return ""
项目:rpi-can-logger    作者:JonnoFTW    | 项目源码 | 文件源码
def _make_csv_writer(self):
        """

        :return:
        """
        self._buffer = StringIO()

        self._bytes_written = 0
        now = datetime.now()
        self._out_csv = open(self.log_folder + '/' + now.strftime('%Y%m%d_%H%M%S.csv'.format(self.make_random(6))), 'w')
        logging.warning("Writing to {} ({} bytes)".format(self._out_csv.name, self.max_bytes))
        self._out_writer = csv.DictWriter(self._buffer, fieldnames=self.fieldnames, restval=None)
        self._out_writer.writeheader()
        self._out_csv.write(self._buffer.getvalue())
        self._reset_buffer()
        self.writerow({'vid': self.vin})
项目:rpi-can-logger    作者:JonnoFTW    | 项目源码 | 文件源码
def _make_writer(self):
        """

        :return:
        """
        self._buffer = StringIO()

        self._bytes_written = 0
        now = datetime.now()
        self.fname = self.log_folder + '/' + now.strftime('%Y%m%d_%H%M%S_{}.json'.format(self.make_random(6)))
        self.fname = str(pathlib.Path(self.fname))
        self._out_fh = open(self.fname, 'w')
        self.write_pid()
        logging.warning("Writing to  {} ({} bytes)".format(self._out_fh.name, self.max_bytes))

        # compress any old files still lying around
        for fname in glob(self.log_folder+"/*.json"):
            if fname != self.fname:
                self._compress(fname)
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def _create_model(self, **kwargs) -> None:
        """
        Create your TensorFlow model.

        Every model has to define:

        - loss tensor named according to given ``loss_name``
        - input placeholders and output tensors named according to the specified input and output names

        .. warning::
            To support multi-GPU training, all the variables must be created with ``tf.get_variable``
            and appropriate variable scopes.

        :param kwargs: model configuration as specified in ``model`` section of the configuration file
        """
        raise NotImplementedError('`_create_model` method must be implemented in order to construct a new model.')
项目:sat6_scripts    作者:RedHatSatellite    | 项目源码 | 文件源码
def log_msg(msg, level):
    """Write message to logfile"""

    # If we are NOT in debug mode, only write non-debug messages to the log
    if level == 'DEBUG':
        if DEBUG:
            logging.debug(msg)
            print BOLD + "DEBUG: " + msg + ENDC
    elif level == 'ERROR':
        logging.error(msg)
        tf.write('ERROR:' + msg + '\n')
        print ERROR + "ERROR: " + msg + ENDC
    elif level == 'WARNING':
        logging.warning(msg)
        tf.write('WARNING:' + msg + '\n')
        print WARNING + "WARNING: " + msg + ENDC
    # Otherwise if we ARE in debug, write everything to the log AND stdout
    else:
        logging.info(msg)
        tf.write(msg + '\n')
项目:gransk    作者:pcbje    | 项目源码 | 文件源码
def load_all(self, config):
    """
    Load all existing data.

    :param config: Configuration object.
    :type config: ``dict``
    """
    self.buckets = {}

    for path in glob.glob(os.path.join(
            config[helper.DATA_ROOT], '%s_buckets-*.pickle' % self.NAME)):
      with open(path, 'rb') as inp:
        try:
          for key, value in pickle.load(inp).items():
            if key in self.buckets:
                self.buckets[key]['bins'].update(value['bins'])
            else:
              self.buckets[key] = value
        except:
          logging.warning('could not load related_%s data', self.NAME)
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def __init__(self, max_age):
      """Constructor.

      Args:
        max_age: Cache expiration in seconds.
      """
      self._max_age = max_age
      self._file = os.path.join(tempfile.gettempdir(), FILENAME)
      f = LockedFile(self._file, 'a+', 'r')
      try:
        f.open_and_lock()
        if f.is_locked():
          _read_or_initialize_cache(f)
        # If we can not obtain the lock, other process or thread must
        # have initialized the file.
      except Exception as e:
        logging.warning(e, exc_info=True)
      finally:
        f.unlock_and_close()
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def set(self, url, content):
    f = LockedFile(self._file, 'r+', 'r')
    try:
      f.open_and_lock()
      if f.is_locked():
        cache = _read_or_initialize_cache(f)
        cache[url] = (content, _to_timestamp(datetime.datetime.now()))
        # Remove stale cache.
        for k, (_, timestamp) in list(cache.items()):
          if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
            del cache[k]
        f.file_handle().truncate(0)
        f.file_handle().seek(0)
        json.dump(cache, f.file_handle())
      else:
        logger.debug('Could not obtain a lock for the cache file.')
    except Exception as e:
      logger.warning(e, exc_info=True)
    finally:
      f.unlock_and_close()
项目:oscars2016    作者:0x0ece    | 项目源码 | 文件源码
def autodetect():
  """Detects an appropriate cache module and returns it.

  Returns:
    googleapiclient.discovery_cache.base.Cache, a cache object which
    is auto detected, or None if no cache object is available.
  """
  try:
    from google.appengine.api import memcache
    from . import appengine_memcache
    return appengine_memcache.cache
  except Exception:
    try:
      from . import file_cache
      return file_cache.cache
    except Exception as e:
      logging.warning(e, exc_info=True)
      return None
项目:upstox-python    作者:upstox    | 项目源码 | 文件源码
def get_instrument_by_symbol(self, exchange, symbol):
        # get instrument given exchange and symbol
        global master_contracts_by_symbol

        exchange = exchange.lower()
        symbol = symbol.lower()
        # check if master contract exists
        if exchange not in master_contracts_by_symbol:
            logging.warning("Cannot find exchange [%s] in master contract. "
                            "Please ensure you have called get_master_contract function first" % exchange)
            return None

        master_contract = master_contracts_by_symbol[exchange]

        if symbol not in master_contract:
            logging.warning("Cannot find symbol [%s:%s] in master contract" % (exchange, symbol))
            return None

        return master_contract[symbol]
项目:upstox-python    作者:upstox    | 项目源码 | 文件源码
def search_instruments(self, exchange, symbol):
        # search instrument given exchange and symbol
        global master_contracts_by_token

        exchange = exchange.lower()
        symbol = symbol.lower()

        matches = []

        # check if master contract exists
        if exchange not in master_contracts_by_token:
            logging.warning(
                "Cannot find exchange [%s] in master contract. "
                "Please ensure you have called get_master_contract function first" % exchange)
            return None

        master_contract = master_contracts_by_token[exchange]

        for contract in master_contract:
            if symbol in master_contract[contract].symbol:
                matches.append(master_contract[contract])

        return matches
项目:upstox-python    作者:upstox    | 项目源码 | 文件源码
def get_instrument_by_token(self, exchange, token):
        # get instrument given exchange and token
        global master_contracts_by_token

        exchange = exchange.lower()

        # check if master contract exists
        if exchange not in master_contracts_by_token:
            logging.warning(
                "Cannot find exchange [%s] in master contract. "
                "Please ensure you have called get_master_contract function first" % exchange)
            return None

        master_contract = master_contracts_by_token[exchange]

        if token not in master_contract:
            logging.warning("Cannot find token [%s:%s] in master contracts" % (exchange, token))
            return None
        return master_contract[token]
项目:htsget    作者:jeromekelleher    | 项目源码 | 文件源码
def __retry(self, method, *args):
        completed = False
        num_retries = 0
        position_before = None
        try:
            # stdout does not support seek/tell, so we disable retry if this fails
            position_before = self.output.tell()
        except IOError:
            pass
        while not completed:
            try:
                method(*args)
                completed = True
            except exceptions.RetryableError as re:
                if position_before is not None and num_retries < self.max_retries:
                    num_retries += 1
                    sleep_time = self.retry_wait  # TODO exponential backoff
                    logging.warning(
                        "Error: '{}' occured; sleeping {}s before retrying "
                        "(attempt={})".format(re, sleep_time, num_retries))
                    self.output.seek(position_before)
                    time.sleep(sleep_time)
                else:
                    raise re
项目:mbin    作者:fanglab    | 项目源码 | 文件源码
def launch():
    opts, h5_files, motifs_fn = __parseArgs()
    __initLog(opts)

    motifs           = np.loadtxt(motifs_fn, dtype="str", ndmin=1)
    motifs,not_found = find_motifs_in_control(opts, motifs)
    if len(not_found)>0:
        logging.warning("")
        logging.warning("  ******************** Important *********************")
        logging.warning("  Did not find %s motifs in %s:" % (len(not_found), opts.control_pkl_name))
        for nf in not_found:
            logging.warning("       %s" % nf)
        logging.warning("  These motif(s) will be removed from further analysis.")
        logging.warning("  These %s motifs will be kept:" % len(motifs))
        for m in motifs:
            logging.warning("       %s" % m)
        logging.warning("  ****************************************************")
        logging.warning("")
    else:
        logging.info("Found entries for all %s motifs in %s" % (len(motifs), opts.control_pkl_name))


    build_profiles(opts, h5_files, motifs, motifs_fn)

    print >> sys.stderr, "mBin methylation profiling has finished running. See log for details."
项目:scrapy_projects    作者:morefreeze    | 项目源码 | 文件源码
def check_url(self, url):
        """try to fetch url to judge opener is worked

        :url: url to check
        :returns: {succ: True, lag: 10(ms)}
        """
        try:
            resp = self.opener.open(url, timeout=self.TIMEOUT)
            logging.debug(resp)
            if resp and resp.code == 200:
                succ = True
        except Exception as e:
            succ = False
            logging.warning('Check url(%s) throught proxy(%s) error: %s' % (url, self.opener.handlers[0].proxies, e))
        # TODO: return lag
        return {'succ': succ}
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def is_valid_client_version(self, uid, status=None):
        '''
        Check that the version of client is new enough that we want to use it.
        Warn and return False if it is not.
        '''
        cname = TallyServer.get_client_display_name(uid)
        cinfo = self.get_client_info(uid, status)
        cdetail = self.get_client_detail(uid, status)
        cversion = self.get_client_version(uid, status)

        # Reject DC versions 1.0.0 and 1.0.1, they didn't add noise
        client_type = self.get_client_type(uid, status)
        pc_version = self._get_client_item(uid, 'privcount_version',
                                                  status, None)
        pc_version_number, _, _ = pc_version.partition(' ')
        if client_type == 'DataCollector':
            if pc_version_number == '1.0.0' or pc_version_number == '1.0.1':
                logging.warning("Insecure Data Collector PrivCount version {}: {} {}"
                                .format(pc_version_number, cname, cinfo))
                logging.debug("Insecure Data Collector PrivCount version {}: {} detail {} {}"
                              .format(pc_version_number, cname, cdetail,
                                      cversion))
                return False

        return True
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def is_ip_address_valid(field_name, fields, event_desc,
                        is_mandatory=False):
    '''
    Check that fields[field_name] passes is_field_valid(), and is a valid
    IPv4 or IPv6 address.

    Return values are like is_string_valid.
    '''
    if not is_field_valid(field_name, fields, event_desc,
                          is_mandatory=is_mandatory):
        return False
    if field_name not in fields:
        # valid optional field, keep on processing
        return True
    field_value = validate_ip_address(fields[field_name])
    if field_value is None:
        # not an IP address
        logging.warning("Ignored {} '{}', must be an IP address {}"
                        .format(field_name, fields[field_name], event_desc))
        return False
    # it is valid and we want to keep on processing
    return True
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def exact_match_prepare_collection(exact_collection):
    '''
    Prepare a hashable object collection for efficient exact matching.
    If the objects in the collection are strings, lowercases them.
    Returns an object that can be passed to exact_match().
    This object must be treated as opaque and read-only.
    '''
    assert exact_collection is not None
    # Set matching uses a hash table, so it's more efficient
    exact_collection = [lower_if_hasattr(obj) for obj in exact_collection]
    exact_set = frozenset(exact_collection)
    # Log a message if there were any duplicates
    # Finding each duplicate takes a lot longer
    if len(exact_collection) != len(exact_set):
      dups = [obj for obj in exact_set if exact_collection.count(obj) > 1]
      dups_summary = summarise_list(sorted(dups), 50)
      logging.warning("Removing {} duplicates from the collection"
                      .format(dups_summary))
    # the encoded json measures transmission size, not RAM size
    logging.info("Exact match prepared {} items ({})"
                 .format(len(exact_set),
                         format_bytes(len(json_serialise(list(exact_set))))))
    return exact_set
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def suffix_reverse_match_uniquify_collection(suffix_obj, separator=""):
    '''
    Check if suffix_obj contains any strings that are a suffix of any other
    strings, log an warning-level message, and remove them.

    If specified, the separator is also required before the suffix.
    For example, domain suffixes use "." as a separator between components.
    '''
    # Find longer suffixes that match shorter suffixes in the list
    longer_suffix_matches = []
    for s in suffix_obj:
        # this doesn't match s itself, only longer duplicates
        if suffix_reverse_match(suffix_obj, reverse_string(s), separator):
            # don't modify the list while iterating it
            longer_suffix_matches.append(s)

    # Removing the longer suffixes is safe, because the list remains in-order
    logging.warning("Removing {} duplicate longer suffixes from the collection"
                    .format(len(longer_suffix_matches)))
    for s in longer_suffix_matches:
        # if there are multiple duplicates, they will all be removed
        suffix_obj.remove(s)
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handshake_cookie_verify(b64_cookie):
        '''
        If b64_cookie matches the expected format for a base-64 encoded
        privcount cookie, return the decoded cookie.
        Otherwise, return False.
        Raises an exception if the cookie is not correctly padded base64.
        '''
        if len(b64_cookie) != PrivCountProtocol.COOKIE_B64_BYTES:
            logging.warning("Invalid cookie: wrong encoded length {} expected {}"
                            .format(len(b64_cookie),
                                    PrivCountProtocol.COOKIE_B64_BYTES))
            return False
        cookie = b64decode(b64_cookie)
        if len(cookie) != PrivCountProtocol.COOKIE_BYTES:
            logging.warning("Invalid cookie: wrong decoded length {} expected {}"
                            .format(len(cookie),
                                    PrivCountProtocol.COOKIE_BYTES))
            return False
        return cookie
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handshake_hmac_verify(b64_hmac, handshake_key, prefix, server_cookie,
                              client_cookie):
        '''
        If b64_hmac matches the expected format for a base-64 encoded
        privcount HMAC, and the HMAC matches the expected HMAC for
        handshake_key, prefix, and the cookies, return True.
        Otherwise, return False.
        Raises an exception if the HMAC is not correctly padded base64.
        '''
        hmac = PrivCountProtocol.handshake_hmac_decode(b64_hmac)
        if not hmac:
            logging.warning("Invalid hmac: wrong format")
            return False
        if not verify_hmac(hmac,
                           handshake_key,
                           prefix,
                           server_cookie +
                           client_cookie):
            logging.warning("Invalid hmac: verification failed")
            return False
        return True
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handshake1_verify(handshake):
        '''
        If handshake matches the expected format for HANDSHAKE1,
        return the server cookie.
        Otherwise, return False.
        Raises an exception if the server cookie is not correctly padded
        base64.
        '''
        if not PrivCountProtocol.handshake_prefix_verify(
                                     handshake,
                                     PrivCountProtocol.HANDSHAKE1,
                                     PrivCountProtocol.ROLE_SERVER):
            return False
        parts = handshake.strip().split()
        if len(parts) != PrivCountProtocol.HANDSHAKE1_PARTS:
            logging.warning("Invalid handshake: wrong number of parts {} expected {}"
                            .format(len(parts),
                                    PrivCountProtocol.HANDSHAKE1_PARTS))
            return False
        server_cookie = PrivCountProtocol.handshake_cookie_verify(
            parts[PrivCountProtocol.HANDSHAKE_PREFIX_PARTS])
        return server_cookie
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handshake4_verify(handshake):
        '''
        If handshake matches the expected format for HANDSHAKE4,
        and the message is SUCCESS, return True.
        Otherwise, return False.
        '''
        if not PrivCountProtocol.handshake_prefix_verify(
                                     handshake,
                                     PrivCountProtocol.HANDSHAKE4,
                                     PrivCountProtocol.ROLE_CLIENT):
            return False
        parts = handshake.strip().split()
        if len(parts) != PrivCountProtocol.HANDSHAKE4_PARTS:
            logging.warning("Invalid handshake: wrong number of parts {} expected {}"
                            .format(len(parts),
                                    PrivCountProtocol.HANDSHAKE4_PARTS))
            return False
        message = parts[PrivCountProtocol.HANDSHAKE_PREFIX_PARTS]
        if message != PrivCountProtocol.HANDSHAKE_SUCCESS:
            logging.warning("Invalid handshake: message was not SUCCESS")
            return False
        return True
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def decodeNonce(encoded_str, min_len, max_len):
        '''
        Decode and check a received nonce.
        Returns the nonce if valid, or None if not valid.
        '''
        assert min_len >= 0
        assert max_len >= min_len
        decoded_bytes = TorControlProtocol.decodeControllerString(encoded_str)
        if len(decoded_bytes) < min_len:
            logging.warning("Received nonce was {} bytes, wanted at least {} bytes"
                            .format(len(decoded_bytes), min_len))
            return None
        if len(decoded_bytes) > max_len:
            logging.warning("Received nonce was {} bytes, wanted no more than {} bytes"
                            .format(len(decoded_bytes), max_len))
            return None
        return decoded_bytes

    # Use aliases for documentation purposes, and to match decoding functions
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def getConfiguredValue(self, get_function_name, value_name,
                           default=None):
        '''
        When we need a value, call factory.get_function_name to get it.
        Log a message containing value_name if this fails, and return default.
        '''
        try:
            # Equivalent to self.factory.get_function_name()
            return getattr(self.factory, get_function_name)()
        except AttributeError as e:
            logging.warning("Connection with {}: tried to get {} via {}, but factory raised {}, returning {}"
                            .format(transport_info(self.transport),
                                    value_name, get_function_name,
                                    e, default))
            return default

    # works for both configured and discovered values
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def writeConfiguredCookieFile(self, cookie_string = None):
        '''
        Write a random 32-byte value to the configured cookie file.
        If cookie_string is not None, use that value.
        Return the value written to the file, or None if there is no cookie
        file, or if writing the file fails.
        '''
        cookie_file = self.getConfiguredCookieFile()
        if cookie_file is not None:
            if cookie_string is None:
                cookie_string = urandom(TorControlProtocol.SAFECOOKIE_LENGTH)
            try:
                with open(cookie_file, 'w') as f:
                    f.write(cookie_string)
            except IOError as e:
                logging.warning("Disabling SAFECOOKIE authentication, writing cookie file '{}' failed with error: {}"
                                .format(cookie_file, e))
                return None
            # sanity check: this will fail in write-only environments
            assert cookie_string == TorControlProtocol.readCookieFile(
                cookie_file)
            return cookie_string
        else:
            return None
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def check_line_length(self, line, is_line_received, is_length_exceeded):
        '''
        Warns on over-length lines, based on whether the line is received,
        and whether the line was delivered via lineLengthExceeded or not
        (sometimes, twisted's lineLengthExceeded only delivers a partial line,
        https://twistedmatrix.com/trac/ticket/6558
        and it had issues counting end of line characters
        https://twistedmatrix.com/trac/ticket/6536
        )
        Terminates the reactor if the line is over-length.
        '''
        is_length_exceeded = is_length_exceeded or len(line) > self.MAX_LENGTH
        is_unsafe_length = is_length_exceeded or len(line) > self.get_warn_length(is_line_received)
        # if we are over the safe length, warn
        if is_unsafe_length:
            logging.warning("{} line of length {} exceeded {} of {}, {} connection to {}"
                            .format("Received" if is_line_received else "Generated",
                                    len(line),
                                    "MAX_LENGTH" if is_length_exceeded else "safe length",
                                    self.get_warn_length(is_line_received),
                                    "dropping" if is_length_exceeded and is_line_received else "keeping",
                                    transport_info(self.transport)))
        # if we send or receive an overlength line, fail
        if is_length_exceeded:
            stop_reactor(1)
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def sendLine(self, line):
        '''
        overrides twisted function
        '''
        logging.debug("Sending line '{}' to {}"
                      .format(line, transport_info(self.transport)))
        self.check_line_length(line, False, False)
        # make sure we don't issue a SETCONF when we're not supposed to
        if line.startswith("SETCONF"):
            use_setconf = self.getConfiguredValue('get_use_setconf',
                                                  'use SETCONF',
                                                  default=True)
            if not use_setconf:
                logging.warning("Connection with {}: protocol tried to use SETCONF when use_setconf was False: '{}'"
                            .format(transport_info(self.transport), line))
                self.quit()
                return
        return LineOnlyReceiver.sendLine(self, line)
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def handleUnexpectedLine(self, line):
        '''
        Log any unexpected responses at an appropriate level.
        Quit on error responses.
        '''
        if line == "250 OK":
            logging.debug("Connection with {}: ok response: '{}'"
                          .format(transport_info(self.transport), line))
        elif line.startswith("650 PRIVCOUNT_"):
            logging.warning("Connection with {}: unexpected event: '{}'"
                            .format(transport_info(self.transport), line))
        elif line.startswith("5"):
            logging.warning("Connection with {}: unexpected response: '{}'"
                            .format(transport_info(self.transport), line))
            self.quit()
        elif line.startswith("2"):
            logging.info("Connection with {}: ok response: '{}'"
                         .format(transport_info(self.transport), line))
        else:
            logging.warning("Connection with {}: unexpected response: '{}'"
                            .format(transport_info(self.transport), line))
            self.quit()
项目:privcount    作者:privcount    | 项目源码 | 文件源码
def check_dc_threshold(dc_threshold, description="threshold"):
    '''
    Check that dc_threshold is a valid dc threshold.
    DC thresholds must be positive non-zero, and less than or equal to
    MAX_DC_COUNT.
    Returns True if the dc threshold is valid.
    Logs a specific warning using description and returns False if it is not.
    '''
    if dc_threshold <= 0:
        logging.warning("Data collector {} must be at least 1, was {}"
                        .format(description, dc_threshold))
        return False
    if dc_threshold > MAX_DC_COUNT:
        logging.warning("Data collector {} can be at most {}, was {}"
                        .format(description, MAX_DC_COUNT, dc_threshold))
        return False
    return True