Python pydoc 模块,locate() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pydoc.locate()

项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create_decoder(self, encoder_output, features, _labels):
    attention_class = locate(self.params["attention.class"]) or \
      getattr(decoders.attention, self.params["attention.class"])
    attention_layer = attention_class(
        params=self.params["attention.params"], mode=self.mode)

    # If the input sequence is reversed we also need to reverse
    # the attention scores.
    reverse_scores_lengths = None
    if self.params["source.reverse"]:
      reverse_scores_lengths = features["source_len"]
      if self.use_beam_search:
        reverse_scores_lengths = tf.tile(
            input=reverse_scores_lengths,
            multiples=[self.params["inference.beam_search.beam_width"]])

    return self.decoder_class(
        params=self.params["decoder.params"],
        mode=self.mode,
        vocab_size=self.target_vocab_info.total_size,
        attention_values=encoder_output.attention_values,
        attention_values_length=encoder_output.attention_values_length,
        attention_keys=encoder_output.outputs,
        attention_fn=attention_layer,
        reverse_scores_lengths=reverse_scores_lengths)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, params):
    super(DecodeText, self).__init__(params)
    self._unk_mapping = None
    self._unk_replace_fn = None

    if self.params["unk_mapping"] is not None:
      self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
    if self.params["unk_replace"]:
      self._unk_replace_fn = functools.partial(
          _unk_replace, mapping=self._unk_mapping)

    self._postproc_fn = None
    if self.params["postproc_fn"]:
      self._postproc_fn = locate(self.params["postproc_fn"])
      if self._postproc_fn is None:
        raise ValueError("postproc_fn not found: {}".format(
            self.params["postproc_fn"]))
项目:py-enarksh    作者:SetBased    | 项目源码 | 文件源码
def _read_xml_nodes(self, xml):
        """
        :param xml.etree.ElementTree.Element xml:
        """
        for element in list(xml):
            module = locate('enarksh.xml_reader.node')
            node = module.create_node(element.tag, self)
            node.read_xml(element)
            name = node.name

            # Check for child nodes with duplicate names.
            if name in self._child_nodes:
                raise Exception("Duplicate child node '{0!s}'.".format(name))

            # Add child node to map of child nodes.
            self._child_nodes[name] = node

    # ------------------------------------------------------------------------------------------------------------------
项目:KagglePlanetPytorch    作者:Mctigger    | 项目源码 | 文件源码
def predict_kfold(model_name, pre_transforms=[]):
    model = locate(model_name + '.generate_model')()
    random_state = locate(model_name + '.random_state')
    print('Random state: {}'.format(random_state))

    labels_df = labels.get_labels_df()
    kf = sklearn.model_selection.KFold(n_splits=5, shuffle=True, random_state=random_state)
    split = kf.split(labels_df)

    for i, (train_idx, val_idx) in enumerate(split):
        split_name = model_name + '-split_' + str(i)
        best_epoch = util.find_epoch_val(split_name)
        print('Using epoch {} for predictions'.format(best_epoch))
        epoch_name = split_name + '-epoch_' + str(best_epoch)
        train = labels_df.ix[train_idx]
        val = labels_df.ix[val_idx]
        state = torch.load(os.path.join(paths.models, split_name, epoch_name))

        predict_model(model, state, train, val, output_file=split_name, pre_transforms=pre_transforms)
项目:pentagon    作者:reactiveops    | 项目源码 | 文件源码
def get_component_class(component_path):
    """ Construct Class path from component input """
    component_path_list = component_path.split(".")
    if len(component_path_list) > 1:
        component_name = ".".join(component_path.split(".")[0:-1])
        component_class_name = component_path.split(".")[-1].title()
    else:
        component_name = component_path
        component_class_name = component_path.title()

    logging.debug('Seeking pentagon.component.{}.{}'.format(component_name, component_class_name))

    # Find Class if it exists
    component_class = locate("pentagon.component.{}.{}".format(component_name, component_class_name))
    if component_class is None:
        logging.debug('pentagon.component.{}.{} not found'.format(component_name, component_class_name))
        logging.debug('Seeking pentagon.{}.{}'.format(component_name, component_class_name))
        component_class = locate("pentagon_{}.{}".format(component_name, component_class_name))

    logging.debug("Found {}".format(component_class))

    return component_class
项目:xmusic-crawler    作者:rockers7414    | 项目源码 | 文件源码
def do_reload(self, arg):
        print("API loading...")
        with open("xmusic-api.json", "r") as f:
            apis = json.load(f)
            for api in apis:
                method = api["method"].replace(" ", "_").lower()
                parser = argparse.ArgumentParser(
                    prog=method,
                    description=api["description"])
                for param in api["parameters"]:
                    parser.add_argument(
                        "--" + param["name"],
                        type=locate(param["type"]),
                        default=param["default"] if "default" in param
                        else None,
                        choices=param["choices"] if "choices" in param
                        else None,
                        required=param["required"],
                        help=param["description"])
                setattr(XMusicShell, "parser_" + method, parser)
                setattr(XMusicShell, "do_" + method, self._process)
                setattr(XMusicShell, "help_" + method, parser.print_help)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _create_decoder(self, encoder_output, features, _labels):
    attention_class = locate(self.params["attention.class"]) or \
      getattr(decoders.attention, self.params["attention.class"])
    attention_layer = attention_class(
        params=self.params["attention.params"], mode=self.mode)

    # If the input sequence is reversed we also need to reverse
    # the attention scores.
    reverse_scores_lengths = None
    if self.params["source.reverse"]:
      reverse_scores_lengths = features["source_len"]
      if self.use_beam_search:
        reverse_scores_lengths = tf.tile(
            input=reverse_scores_lengths,
            multiples=[self.params["inference.beam_search.beam_width"]])

    return self.decoder_class(
        params=self.params["decoder.params"],
        mode=self.mode,
        vocab_size=self.target_vocab_info.total_size,
        attention_values=encoder_output.attention_values,
        attention_values_length=encoder_output.attention_values_length,
        attention_keys=encoder_output.outputs,
        attention_fn=attention_layer,
        reverse_scores_lengths=reverse_scores_lengths)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self,
               params,
               mode,
               vocab_size,
               config,
               target_embedding,
               pos_embedding,
               start_tokens,
               name="conv_decoder_fairseq"):
    GraphModule.__init__(self, name)
    Configurable.__init__(self, params, mode)

    self.vocab_size = vocab_size
    self.config=config
    self.target_embedding=target_embedding 
    self.start_tokens=start_tokens
    self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
    self.pos_embed = pos_embedding
    self.current_inputs = None
    self.initial_state = None
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, params):
    super(DecodeText, self).__init__(params)
    self._unk_mapping = None
    self._unk_replace_fn = None

    if self.params["unk_mapping"] is not None:
      self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
    if self.params["unk_replace"]:
      self._unk_replace_fn = functools.partial(
          _unk_replace, mapping=self._unk_mapping)

    self._postproc_fn = None
    if self.params["postproc_fn"]:
      self._postproc_fn = locate(self.params["postproc_fn"])
      if self._postproc_fn is None:
        raise ValueError("postproc_fn not found: {}".format(
            self.params["postproc_fn"]))
项目:django-rest-framework-client    作者:qvantel    | 项目源码 | 文件源码
def lookup_by_objref(objref):
    """
    Imports an object by an ObjRef object.

    If ObjRef object also contains module attribute, it will also attempt to relative import from it
    when absolute import was not successful.
    """
    obj = pydoc.locate(objref.name)
    if obj is None:
        if objref.module is None:
            raise ImportError('Unable to import "%s"' % (objref.name))
        path = '.'.join([objref.module, objref.name])
        obj = pydoc.locate(path)
        if obj is None:
            raise ImportError('Unable to import "%s" nor "%s"' % (objref.name, path))
    return obj
项目:zippy    作者:securesystemslab    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', 'builtins.str',
                     'builtins.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(o))

        for name in ('notbuiltins', 'strrr', 'strr.translate',
                     'str.trrrranslate', 'builtins.strrr',
                     'builtins.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:oil    作者:oilshell    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', '__builtin__.str',
                     '__builtin__.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(name))

        for name in ('not__builtin__', 'strrr', 'strr.translate',
                     'str.trrrranslate', '__builtin__.strrr',
                     '__builtin__.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:python2-tracer    作者:extremecoders-re    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', '__builtin__.str',
                     '__builtin__.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(name))

        for name in ('not__builtin__', 'strrr', 'strr.translate',
                     'str.trrrranslate', '__builtin__.strrr',
                     '__builtin__.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:web_ctp    作者:molebot    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', 'builtins.str',
                     'builtins.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(o))

        for name in ('notbuiltins', 'strrr', 'strr.translate',
                     'str.trrrranslate', 'builtins.strrr',
                     'builtins.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:pefile.pypy    作者:cloudtracer    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', '__builtin__.str',
                     '__builtin__.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(name))

        for name in ('not__builtin__', 'strrr', 'strr.translate',
                     'str.trrrranslate', '__builtin__.strrr',
                     '__builtin__.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:automatic-summarization    作者:mozilla    | 项目源码 | 文件源码
def _create_decoder(self, encoder_output, features, _labels):
    attention_class = locate(self.params["attention.class"]) or \
      getattr(decoders.attention, self.params["attention.class"])
    attention_layer = attention_class(
        params=self.params["attention.params"], mode=self.mode)

    # If the input sequence is reversed we also need to reverse
    # the attention scores.
    reverse_scores_lengths = None
    if self.params["source.reverse"]:
      reverse_scores_lengths = features["source_len"]
      if self.use_beam_search:
        reverse_scores_lengths = tf.tile(
            input=reverse_scores_lengths,
            multiples=[self.params["inference.beam_search.beam_width"]])

    return self.decoder_class(
        params=self.params["decoder.params"],
        mode=self.mode,
        vocab_size=self.target_vocab_info.total_size,
        attention_values=encoder_output.attention_values,
        attention_values_length=encoder_output.attention_values_length,
        attention_keys=encoder_output.outputs,
        attention_fn=attention_layer,
        reverse_scores_lengths=reverse_scores_lengths)
项目:automatic-summarization    作者:mozilla    | 项目源码 | 文件源码
def __init__(self, params):
    super(DecodeText, self).__init__(params)
    self._unk_mapping = None
    self._unk_replace_fn = None

    if self.params["unk_mapping"] is not None:
      self._unk_mapping = _get_unk_mapping(self.params["unk_mapping"])
    if self.params["unk_replace"]:
      self._unk_replace_fn = functools.partial(
          _unk_replace, mapping=self._unk_mapping)

    self._postproc_fn = None
    if self.params["postproc_fn"]:
      self._postproc_fn = locate(self.params["postproc_fn"])
      if self._postproc_fn is None:
        raise ValueError("postproc_fn not found: {}".format(
            self.params["postproc_fn"]))
项目:ouroboros    作者:pybee    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', 'builtins.str',
                     'builtins.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(name))

        for name in ('notbuiltins', 'strrr', 'strr.translate',
                     'str.trrrranslate', 'builtins.strrr',
                     'builtins.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:ndk-python    作者:gittor    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', '__builtin__.str',
                     '__builtin__.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(o))

        for name in ('not__builtin__', 'strrr', 'strr.translate',
                     'str.trrrranslate', '__builtin__.strrr',
                     '__builtin__.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:napalm-logs    作者:napalm-automation    | 项目源码 | 文件源码
def cast(var, function):
    # If the function is a build in function
    if locate(function) and hasattr(locate(function), '__call__'):
        try:
            return locate(function)(var)
        except ValueError:
            log.error('Unable to use function %s on value %s', function, var, exc_info=True)
    # If the function is str function
    if hasattr(str, function) and\
            hasattr(getattr(str, function), '__call__'):
        return getattr(str, function)(var)
    glob = globals()
    # If the function is defined in this module
    if function in glob and hasattr(glob[function], '__call__'):
        return glob[function](var)
    # If none of the above, just return the original var
    return var
项目:cs224n_prj    作者:lps-stanf    | 项目源码 | 文件源码
def _add_ini_file_section(self, config_parser, section_name, require_provided_section=True):
        sections_list = config_parser.sections()
        if section_name not in sections_list:
            if not require_provided_section:
                return
            raise RuntimeError('No required section in config file: "{0}"'.format(section_name))

        for option_key in config_parser.options(section_name):
            option_value = config_parser.get(section_name, option_key)
            option_key_list = option_key.split()
            if len(option_key_list) > 2:
                raise ValueError('Error in config, key is too long "{}"'.format(option_key))
            type = None
            if len(option_key_list) == 2:
                type = locate(option_key_list[0])
                option_key_list.pop(0)
            self.add_key_value(option_key_list[0], option_value, type)
项目:rill    作者:PermaData    | 项目源码 | 文件源码
def importable_class_name(klass, assert_valid=False):
    '''
    Create an string to use for locating the given class.

    Returns
    -------
    str
    '''
    import pydoc
    name = "{}.{}".format(klass.__module__, klass.__name__)
    if assert_valid:
        obj = pydoc.locate(name)
        if obj is None:
            raise ValueError("Could not locate {} at {}".format(klass, name))
        elif obj is not klass:
            raise ValueError("Object {} at {} is not "
                             "the same as {}".format(obj, name, klass))
    return name
项目:open-syllabus-project    作者:davidmcclure    | 项目源码 | 文件源码
def queue_page(model_import, job_import, worker_count, offset):

    """
    Spool a page of model instances for a job.

    Args:
        model_import (str)
        job_import (str)
        worker_count (int)
        offset (int)
    """

    # Import callables.
    model = locate(model_import)
    job = locate(job_import)

    for row in model.page_cursor(worker_count, offset):
        config.rq.enqueue(job, row.id)
项目:kbe_server    作者:xiaohaoppy    | 项目源码 | 文件源码
def test_builtin(self):
        for name in ('str', 'str.translate', 'builtins.str',
                     'builtins.str.translate'):
            # test low-level function
            self.assertIsNotNone(pydoc.locate(name))
            # test high-level function
            try:
                pydoc.render_doc(name)
            except ImportError:
                self.fail('finding the doc of {!r} failed'.format(name))

        for name in ('notbuiltins', 'strrr', 'strr.translate',
                     'str.trrrranslate', 'builtins.strrr',
                     'builtins.str.trrranslate'):
            self.assertIsNone(pydoc.locate(name))
            self.assertRaises(ImportError, pydoc.render_doc, name)
项目:pypydispatcher    作者:scrapy    | 项目源码 | 文件源码
def __init__ (
        self, baseModules, destinationDirectory = ".",
        recursion = 1, exclusions = (),
        recursionStops = (),
        formatter = None
    ):
        self.destinationDirectory = os.path.abspath( destinationDirectory)
        self.exclusions = {}
        self.warnings = []
        self.baseSpecifiers = {}
        self.completed = {}
        self.recursionStops = {}
        self.recursion = recursion
        for stop in recursionStops:
            self.recursionStops[ stop ] = 1
        self.pending = []
        for exclusion in exclusions:
            try:
                self.exclusions[ exclusion ]= pydoc.locate ( exclusion)
            except pydoc.ErrorDuringImport, value:
                self.warn( """Unable to import the module %s which was specified as an exclusion module"""% (repr(exclusion)))
        self.formatter = formatter or DefaultFormatter()
        for base in baseModules:
            self.addBase( base )
项目:antares    作者:CONABIO    | 项目源码 | 文件源码
def handle(self, **options):
        path = options['path'][0]
        column = options['column'][0]
        model = options['model'][0]



        with fiona.open(path) as src:
            print json.dumps(src.schema, indent=4)
            print src.crs
            for feat in src:

                #print feat['geometry']['type']
                s = shape(feat['geometry'])
                if feat['geometry']['type'] == 'Polygon':
                    s = MultiPolygon([s])
                    print json.dumps(feat['geometry'])
                klass = locate('madmex.models.%s' % model)

                f = klass(name=feat['properties'][column], the_geom=GEOSGeometry(s.wkt))
                f.save()
项目:CAEML    作者:Renumics    | 项目源码 | 文件源码
def constructCaemlObj_fromCaemlDict(aDict: dict) -> caemlBaseObj:
    """Constructs a object of caeml.base from a dict if caeml knows how to contruct, else aDict is returned."""
    if not 'caemlType' in aDict:
        raise ValueError('aDict must include a CAEMl type')
    aClassName = aDict.pop('caemlType')
    if not type(aClassName) is list:
        aClassName = [aClassName]
    logging.getLogger('system').debug('Building object of type' + aClassName[0])
    aClass = locate(aClassName[0])  # TODO manager autocomplete, TODO: manager
    if (not aClass):
        raise Exception('No ctor found for ' + aClassName[0])

    try:
        if 'name' in aDict:
            aObject = aClass(**aDict)  # TODO: maybe validate parent<-> child relationships here:
        else:
            aObject = aClass(**aDict)
        return aObject
    except Exception as e:
        raise Exception('Ctor of {} raised {}'.format(aClassName[0], str(e)))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, encoder_outputs, decoder_state_size, params, mode):
    super(InitialStateBridge, self).__init__(encoder_outputs,
                                             decoder_state_size, params, mode)

    if not hasattr(encoder_outputs, self.params["bridge_input"]):
      raise ValueError("Invalid bridge_input not in encoder outputs.")

    self._bridge_input = getattr(encoder_outputs, self.params["bridge_input"])
    self._activation_fn = locate(self.params["activation_fn"])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, params, mode, name="basic_seq2seq"):
    super(BasicSeq2Seq, self).__init__(params, mode, name)
    self.encoder_class = locate(self.params["encoder.class"])
    self.decoder_class = locate(self.params["decoder.class"])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create_bridge(self, encoder_outputs, decoder_state_size):
    """Creates the bridge to be used between encoder and decoder"""
    bridge_class = locate(self.params["bridge.class"]) or \
      getattr(bridges, self.params["bridge.class"])
    return bridge_class(
        encoder_outputs=encoder_outputs,
        decoder_state_size=decoder_state_size,
        params=self.params["bridge.params"],
        mode=self.mode)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def cell_from_spec(cell_classname, cell_params):
  """Create a RNN Cell instance from a JSON string.

  Args:
    cell_classname: Name of the cell class, e.g. "BasicLSTMCell".
    cell_params: A dictionary of parameters to pass to the cell constructor.

  Returns:
    A RNNCell instance.
  """

  cell_params = cell_params.copy()

  # Find the cell class
  cell_class = locate(cell_classname) or getattr(rnn_cell, cell_classname)

  # Make sure additional arguments are valid
  cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
  for key in cell_params.keys():
    if key not in cell_args:
      raise ValueError(
          """{} is not a valid argument for {} class. Available arguments
          are: {}""".format(key, cell_class.__name__, cell_args))

  # Create cell
  return cell_class(**cell_params)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
  """Loads model from a configuration file"""
  with gfile.GFile(config_path) as config_file:
    config = yaml.load(config_file)
  model_cls = locate(config["model"]) or getattr(models, config["model"])
  model_params = config["model_params"]
  if hparam_overrides:
    model_params.update(hparam_overrides)
  # Change the max decode length to make the test run faster
  model_params["decoder.params"]["max_decode_length"] = 5
  model_params["vocab_source"] = vocab_file
  model_params["vocab_target"] = vocab_file
  return model_cls(params=model_params, mode=mode)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, params, name):
    # We don't call the super constructor on purpose
    #pylint: disable=W0231
    """Initializer"""
    Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL)
    self._name = name
    self._eos_token = self.params["eos_token"]
    self._sos_token = self.params["sos_token"]
    self._separator = self.params["separator"]
    self._postproc_fn = None
    if self.params["postproc_fn"]:
      self._postproc_fn = locate(self.params["postproc_fn"])
      if self._postproc_fn is None:
        raise ValueError("postproc_fn not found: {}".format(
            self.params["postproc_fn"]))
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, params, mode, name="conv_encoder"):
    super(ConvEncoder, self).__init__(params, mode, name)
    self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def __init__(self, params, mode, name="pooling_encoder"):
    super(PoolingEncoder, self).__init__(params, mode, name)
    self._pooling_fn = locate(self.params["pooling_fn"])
    self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
项目:uchroma    作者:cyanogen    | 项目源码 | 文件源码
def generate():
        hw = locate('uchroma.server.Hardware')
        assert hw is not None

        hwdb = ""
        for hw_type in hw.Type:
            for model in hw.get_type(hw_type):
                hwdb += ('uchroma:usb:v%04Xp%04X*\n'
                         ' UCHROMA_DEVICE=%s\n\n'
                         % (model.vendor_id, model.product_id, model.type.name.lower()))

        return hwdb
项目:py-enarksh    作者:SetBased    | 项目源码 | 文件源码
def _read_xml_generator(self, xml):
        """
        :param lxml.etree.Element xml:
        """
        module = locate('enarksh.xml_reader.node')
        node = module.create_node('CommandJob', self)
        node.read_xml(xml)

        # Add child node to map of child nodes.
        self._generator = node

    # ------------------------------------------------------------------------------------------------------------------
项目:py-enarksh    作者:SetBased    | 项目源码 | 文件源码
def _read_xml_worker(self, xml):
        """
        :param lxml.etree.Element xml:
        """
        module = locate('enarksh.xml_reader.node')
        node = module.create_node('DynamicOuterWorker', self)
        node.read_xml(xml)

        # Add child node to map of child nodes.
        self._worker = node

    # ------------------------------------------------------------------------------------------------------------------
项目:pysploit    作者:spencerdodd    | 项目源码 | 文件源码
def load_exploit(self, exploit_to_load):
        """Loads a given exploit to the active session

        Checking for exploit validity happens in this method. The check is a
        dynamic module load from the exploits module dir. If we don't get an
        error, then the exploit exists. If we get an error, the exploit was
        not entered correctly. Accession-by-title.
        """
        #try:
        exploit_module = locate("exploits." + exploit_to_load.strip() + "." + exploit_to_load.strip())
        exploit_instance = exploit_module()
        self.active_session.set_exploit(exploit_instance)
        #except ModuleNotFoundError as mnfe:
        #   print ("{}\n\t[*] Entered: {}".format(INVALID_EXPLOIT_ERROR, exploit_module_name))
项目:napalm-base    作者:napalm-automation    | 项目源码 | 文件源码
def raise_exception(result):
    exc = locate(result["exception"])
    if exc:
        raise exc(*result.get("args", []), **result.get("kwargs", {}))
    else:
        raise TypeError("Couldn't resolve exception {}", result["exception"])
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def locate_with_hint(class_path, prefix_hints=[]):
    module_or_class = locate(class_path)
    if module_or_class is None:
        # for hint in iscanr(lambda x, y: x + "." + y, prefix_hints):
        #     module_or_class = locate(hint + "." + class_path)
        #     if module_or_class:
        #         break
        hint = ".".join(prefix_hints)
        module_or_class = locate(hint + "." + class_path)
    return module_or_class
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, encoder_outputs, decoder_state_size, params, mode):
    super(InitialStateBridge, self).__init__(encoder_outputs,
                                             decoder_state_size, params, mode)

    if not hasattr(encoder_outputs, self.params["bridge_input"]):
      raise ValueError("Invalid bridge_input not in encoder outputs.")

    self._bridge_input = getattr(encoder_outputs, self.params["bridge_input"])
    self._activation_fn = locate(self.params["activation_fn"])
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, params, mode, name="basic_seq2seq"):
    super(BasicSeq2Seq, self).__init__(params, mode, name)
    self.encoder_class = locate(self.params["encoder.class"])
    self.decoder_class = locate(self.params["decoder.class"])
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _create_bridge(self, encoder_outputs, decoder_state_size):
    """Creates the bridge to be used between encoder and decoder"""
    bridge_class = locate(self.params["bridge.class"]) or \
      getattr(bridges, self.params["bridge.class"])
    return bridge_class(
        encoder_outputs=encoder_outputs,
        decoder_state_size=decoder_state_size,
        params=self.params["bridge.params"],
        mode=self.mode)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, params, mode, name="conv_seq2seq"):
    super(ConvSeq2Seq, self).__init__(params, mode, name)
    self.encoder_class = locate(self.params["encoder.class"])
    self.decoder_class = locate(self.params["decoder.class"])
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def cell_from_spec(cell_classname, cell_params):
  """Create a RNN Cell instance from a JSON string.

  Args:
    cell_classname: Name of the cell class, e.g. "BasicLSTMCell".
    cell_params: A dictionary of parameters to pass to the cell constructor.

  Returns:
    A RNNCell instance.
  """

  cell_params = cell_params.copy()

  # Find the cell class
  cell_class = locate(cell_classname) or getattr(rnn_cell, cell_classname)

  # Make sure additional arguments are valid
  cell_args = set(inspect.getargspec(cell_class.__init__).args[1:])
  for key in cell_params.keys():
    if key not in cell_args:
      raise ValueError(
          """{} is not a valid argument for {} class. Available arguments
          are: {}""".format(key, cell_class.__name__, cell_args))

  # Create cell
  return cell_class(**cell_params)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _create_from_dict(dict_, default_module, *args, **kwargs):
  """Creates a configurable class from a dictionary. The dictionary must have
  "class" and "params" properties. The class can be either fully qualified, or
  it is looked up in the modules passed via `default_module`.
  """
  class_ = locate(dict_["class"]) or getattr(default_module, dict_["class"])
  params = {}
  if "params" in dict_:
    params = dict_["params"]
  instance = class_(params, *args, **kwargs)
  return instance
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
  """Loads model from a configuration file"""
  with gfile.GFile(config_path) as config_file:
    config = yaml.load(config_file)
  model_cls = locate(config["model"]) or getattr(models, config["model"])
  model_params = config["model_params"]
  if hparam_overrides:
    model_params.update(hparam_overrides)
  # Change the max decode length to make the test run faster
  model_params["decoder.params"]["max_decode_length"] = 5
  model_params["vocab_source"] = vocab_file
  model_params["vocab_target"] = vocab_file
  return model_cls(params=model_params, mode=mode)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, params, name):
    # We don't call the super constructor on purpose
    #pylint: disable=W0231
    """Initializer"""
    Configurable.__init__(self, params, tf.contrib.learn.ModeKeys.EVAL)
    self._name = name
    self._eos_token = self.params["eos_token"]
    self._sos_token = self.params["sos_token"]
    self._separator = self.params["separator"]
    self._postproc_fn = None
    if self.params["postproc_fn"]:
      self._postproc_fn = locate(self.params["postproc_fn"])
      if self._postproc_fn is None:
        raise ValueError("postproc_fn not found: {}".format(
            self.params["postproc_fn"]))
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def __init__(self, params, mode, name="pooling_encoder"):
    super(PoolingEncoder, self).__init__(params, mode, name)
    self._pooling_fn = locate(self.params["pooling_fn"])
    self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])