Python keras.models 模块,model_from_yaml() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用keras.models.model_from_yaml()

项目:EMNIST    作者:Coopss    | 项目源码 | 文件源码
def load_model(bin_dir):
    ''' Load model from .yaml and the weights from .h5

        Arguments:
            bin_dir: The directory of the bin (normally bin/)

        Returns:
            Loaded model from file
    '''

    # load YAML and create model
    yaml_file = open('%s/model.yaml' % bin_dir, 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    model = model_from_yaml(loaded_model_yaml)

    # load weights into new model
    model.load_weights('%s/model.h5' % bin_dir)
    return model
项目:pyannote-audio    作者:pyannote    | 项目源码 | 文件源码
def from_disk(cls, architecture, weights):
        """Load pre-trained sequence embedding from disk

        Parameters
        ----------
        architecture : str
            Path to architecture file (e.g. created by `to_disk` method)
        weights : str
            Path to pre-trained weight file (e.g. created by `to_disk` method)

        Returns
        -------
        sequence_embedding : SequenceEmbedding
            Pre-trained sequence embedding model.
        """
        self = SequenceEmbedding()

        with open(architecture, 'r') as fp:
            yaml_string = fp.read()
        self.embedding_ = model_from_yaml(
            yaml_string, custom_objects=CUSTOM_OBJECTS)
        self.embedding_.load_weights(weights)
        return self
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def load_keras_model(weights, yaml=None, json=None,
                     normalise_conv_for_one_hot_encoded_input=False,
                     axis_of_normalisation=None,
                     name_of_conv_layer_to_normalise=None): 
    if (normalise_conv_for_one_hot_encoded_input):
        assert axis_of_normalisation is not None,\
         "specify axis of normalisation for normalising one-hot encoded input"
    assert yaml is not None or json is not None,\
     "either yaml or json must be specified"
    assert yaml is None or json is None,\
     "only one of yaml or json must be specified"
    if (yaml is not None):
        from keras.models import model_from_yaml 
        model = model_from_yaml(open(yaml).read()) 
    else:
        from keras.models import model_from_json 
        model = model_from_json(open(json).read()) 
    model.load_weights(weights) 
    if (normalise_conv_for_one_hot_encoded_input):
        mean_normalise_first_conv_layer_weights(
         model,
         axis_of_normalisation=axis_of_normalisation,
         name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise)
    return model
项目:TextClassification    作者:mosu027    | 项目源码 | 文件源码
def lstm_predict(string):
    print 'loading model......'
    with open(yml_path, 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print 'loading weights......'
    model.load_weights(h5_path)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',metrics=['accuracy'])
    data=input_transform(string)
    data.reshape(1,-1)
    #print data
    result=model.predict_classes(data)
    # if result[0][0]==1:
    #     print string,' positive'
    # else:
    #     print string,' negative'
    print result
项目:patriots    作者:wdxtub    | 项目源码 | 文件源码
def predict_arr(arr):
  dict = loaddict()

  probas = []
  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

  for s in arr:
    textarr = list(jieba.cut(s))
    textvec = []
    add = 1
    for item in textarr:
      # ??????????????????????????????
      if item in dict['id']:
        textvec.append(dict['id'][item])
    textvec = pd.Series(textvec)  
    textvec = sequence.pad_sequences([textvec], maxlen=maxlen)

    proba = model.predict_proba(textvec, verbose=0)
    probas.append(proba[0][0])

  return probas
项目:emu    作者:mlosch    | 项目源码 | 文件源码
def _load_model_config(model_cfg, model_weights):
        if type(model_cfg) == str:
            if not os.path.exists(model_cfg):
                try:
                    class_ = getattr(applications, model_cfg)
                    return class_(weights=model_weights)
                except AttributeError:
                    available_mdls = [attr for attr in dir(applications) if callable(getattr(applications, attr))]
                    raise ValueError('Could not load pretrained model with key {}. '
                                     'Available models: {}'.format(model_cfg, ', '.join(available_mdls)))

            with open(model_cfg, 'r') as fileh:
                try:
                    return model_from_json(fileh)
                except ValueError:
                    pass

                try:
                    return model_from_yaml(fileh)
                except ValueError:
                    pass

            raise ValueError('Could not load model from configuration file {}. '
                             'Make sure the path is correct and the file format is yaml or json.'.format(model_cfg))
        elif type(model_cfg) == dict:
            return Model.from_config(model_cfg)
        elif type(model_cfg) == list:
            return Sequential.from_config(model_cfg)

        raise ValueError('Could not load model from configuration object of type {}.'.format(type(model_cfg)))
项目:DeepBach    作者:Ghadjeres    | 项目源码 | 文件源码
def load_model(model_name, yaml=True):
    """

    :rtype: object
    """
    if yaml:
        ext = '.yaml'
        model = model_from_yaml(open(model_name + ext).read())
    else:
        ext = '.json'
        model = model_from_json(open(model_name + ext).read())
    model.load_weights(model_name + '_weights.h5')
    # model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
    print("model " + model_name + " loaded")
    return model
项目:smiles-neural-network    作者:PMitura    | 项目源码 | 文件源码
def loadModel(modelName, layerPrefix=None):
    model = model_from_yaml(open(os.path.join(cc.cfg['persistence']['model_dir'], modelName+'.yml')).read())
    model.load_weights(os.path.join(os.path.join(cc.cfg['persistence']['model_dir'], modelName+'.h5')))

    if layerPrefix:
        for i in xrange(len(model.layers)):
            model.layers[i].name = layerPrefix + model.layers[i].name

    return model
项目:hyperas    作者:maxpumperla    | 项目源码 | 文件源码
def voting_model_from_yaml(yaml_list, voting='hard', weights=None):
    model_list = map(lambda yml: model_from_yaml(yml), yaml_list)
    return VotingModel(model_list, voting, weights)
项目:pyannote-audio    作者:pyannote    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        current_embedding = self.extract_embedding(self.model)
        architecture = model_from_yaml(
            current_embedding.to_yaml(),
            custom_objects=CUSTOM_OBJECTS)
        current_weights = current_embedding.get_weights()

        from pyannote.audio.embedding.base import SequenceEmbedding
        sequence_embedding = SequenceEmbedding()

        sequence_embedding.embedding_ = architecture
        sequence_embedding.embedding_.set_weights(current_weights)
        setattr(self.generator, self.name, sequence_embedding)
项目:deep-learning-with-Keras    作者:decordoba    | 项目源码 | 文件源码
def load_model(location):
    with open(location + "/model.yaml", "r") as f:
        model = model_from_yaml(f)
    model.load_weights(location + '/weights.h5')
    return model
项目:Vehicle-identification    作者:soloice    | 项目源码 | 文件源码
def load_model(self):
        model_path = '../model/'
        self.model = model_from_yaml(open(os.path.join(model_path, self.model_name + '_arch.yaml')).read())
        self.model.load_weights(os.path.join(model_path, self.model_name + '_weights.h5'))

        model_name = self.model_name + '_vision'
        self.vision_model = model_from_yaml(open(os.path.join(model_path, model_name + '_arch.yaml')).read())
        self.vision_model.load_weights(os.path.join(model_path, model_name + '_weights.h5'))
项目:TextClassification    作者:mosu027    | 项目源码 | 文件源码
def evaluate():
    print 'loading model......'
    with open(yml_path, 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print 'loading weights......'
    model.load_weights(h5_path)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',metrics=['accuracy'])


    testData = pd.read_csv(testPath, sep="\t")
    testX = list(testData["text"])
    y_pred = []
    print testX[0]
    for i in xrange(len(testData)):


        data=input_transform(str(testX[i]))
        data.reshape(1,-1)
        pred_value = model.predict_classes(data)
        y_pred.append(pred_value[0])

    save_path = "doc/result.txt"
    desc = "basic lstm"
    result_str = result.printMultiResult(testData["score"], y_pred)
    result.saveResult(save_path,desc, result_str)
项目:patriots    作者:wdxtub    | 项目源码 | 文件源码
def predict(text):
  print('Loading Dict Data..')
  dict = loaddict()

  # ???????????????????????????????
  textarr = list(jieba.cut(text))

  textvec = []
  add = 1
  for item in textarr:
    # ??????????????????????????????
    if item in dict['id']:
      textvec.append(dict['id'][item])

  textvec = pd.Series(textvec)  
  textvec = sequence.pad_sequences([textvec], maxlen=maxlen)

  # ---- 
  print('loading model......')
  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)

  print('loading weights......')
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

  # ?????????????????
  #classes = model.predict_classes(textvec, verbose=1)
  proba = model.predict_proba(textvec, verbose=0)
  # ????????????????
  #for s in proba:
  #  if s[0] > 0.5:
  #    print('positive ' + str(s[0]) + ' ' + text)
  #  else:
  #    print('negative ' + str(s[0]) + ' ' + text)
  return proba[0][0]
项目:10703_HW3    作者:ghliu    | 项目源码 | 文件源码
def load_model(model_config_path, model_weights_path=None):
    """Load a saved model.

    Parameters
    ----------
    model_config_path: str
      The path to the model configuration yaml file. We have provided
      you this file for problems 2 and 3.
    model_weights_path: str, optional
      If specified, will load keras weights from hdf5 file.

    Returns
    -------
    keras.models.Model
    """
    with open(model_config_path, 'r') as f:
        model = model_from_yaml(f.read())

    if model_weights_path is not None:
        model.load_weights(model_weights_path)

    model.summary()

    return model

# TODO
项目:TextClassification    作者:mosu027    | 项目源码 | 文件源码
def train():
    print 'Loading Data...'
    X,Y = loadfile()
    print 'Tokenising...'
    pn = tokenizer(X)
    print 'Generating Dict...'
    dict = generatedict(pn)
    print 'Word to Index...'
    pn = word2index(pn, dict)
    print 'Preparing data...'
    x,y,xt,yt,xa = getdata(pn, Y)
    print 'Model Stage...'
    # ????????
    model = train_lstm(dict, x, y, xt, yt)
    #print('Save Test Result...')
    #saveresult(model, xt, pn)
    print "Done"

    return model


# def loaddict():
#   fr = open(modeldir + '/dict.data')
#   dict = pickle.load(fr)
#   return dict


# def classify(text):
#     dict = loaddict()
#
#     with open(modeldir + '/lstm.yml', 'r') as f:
#         yaml_string = yaml.load(f)
#     model = model_from_yaml(yaml_string)
#     model.load_weights(modeldir + '/lstm.h5')
#     model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#
#     textvec = []
#     for item in text:
#         # ??????????????????????????????
#         if item in dict['id']:
#             textvec.append(dict['id'][item])
#     textvec = pd.Series(textvec)
#     textvec = sequence.pad_sequences([textvec], maxlen=maxlen)
#     # ??
#     prob = model.predict(textvec, verbose=0)
#     proba = model.predict_proba(textvec, verbose=0)
#     print "The preidction is : ", prob
项目:patriots    作者:wdxtub    | 项目源码 | 文件源码
def batchtest(filepath):
  dict = loaddict()

  with open(modeldir + '/lstm.yml', 'r') as f:
    yaml_string = yaml.load(f)
  model = model_from_yaml(yaml_string)
  model.load_weights(modeldir + '/lstm.h5')
  model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

  # ??????
  # ???? for ????????
  test_count = 0
  correct_count = 0
  if os.path.exists(filepath):
    f = open(filepath, 'r')
    try:
      lines = f.readlines()
      for line in lines:
        if len(line) <= 0:
          continue
        else:
          arr = line.split(',')
          label = arr[0]
          test_count += 1
          text = ",".join(arr[1:])
          textarr = list(jieba.cut(text))
          textvec = []
          add = 1
          for item in textarr:
            # ??????????????????????????????
            if item in dict['id']:
              textvec.append(dict['id'][item])
          textvec = pd.Series(textvec)  
          textvec = sequence.pad_sequences([textvec], maxlen=maxlen)
          # ??
          proba = model.predict_proba(textvec, verbose=0)
          # ????????
          for s in proba:
            if s[0] > 0.5 and label == '1' or s[0] <= 0.5 and label == '0':
              correct_count += 1
              print('[' + str(test_count) + ']: ' + label + ' ' + str(s[0]) + ' ' + text[:-1])
            else:
              print('[' + str(test_count) + ']:[x] ' + label + ' ' + str(s[0]) + ' ' + text[:-1])
    finally:
      f.close() # ????
  return correct_count, test_count

# ?????????????????????
项目:derplearning    作者:John-Ellis    | 项目源码 | 文件源码
def __init__(self, log, model_path, weights_path):
        """
        Open the model
        """
        self.log = log
        self.model_path = model_path
        self.weights_path = weights_path

        '''
        self.source_size = (640, 480)
        self.crop_size = (640, 160)
        self.crop_x = 0
        self.crop_y = self.source_size[1] - self.crop_size[1]
        self.target_size = (128, 32)
        '''

        # Line Model input characteristics:
        self.source_size = (car_cfg['record']['width'], 
                            car_cfg['record']['height'])
        self.crop_size = (  lm_cfg['line']['cropped_width'],
                            lm_cfg['line']['cropped_height'] )
        self.target_size = (lm_cfg['line']['input_width'] , 
                            lm_cfg['line']['input_height'])

        self.crop_x = int( (self.source_size[0] - self.crop_size[0] ) /2 )
        self.crop_y = self.source_size[1] - self.crop_size[1] 

        if model_path is not None and weights_path is not None:
            with open(model_path) as f:
                json_contents = f.read()
            self.model = model_from_yaml(json_contents)
            self.model.load_weights(weights_path)

        #define model output characteristics:
        self.n_lines = lm_cfg['line']['n_lines']
        self.n_points = lm_cfg['line']['n_points']
        self.n_dimensions = lm_cfg['line']['n_dimensions']

        #define camera characteristics
        #linear measurements given in mm
        self.camera_height = 380
        self.camera_min_view = 500 #Fixme remeasure distance
        #arcs measured in radians
        self.camera_to_ground_arc = np.arctan(self.camera_min_view / self.camera_height)
        self.camera_offset_y = 0
        self.camera_arc_y = 80 * (np.pi / 180)
        self.camera_arc_x = 60 * (np.pi / 180)
        self.crop_ratio = [c / s for c, s in zip(self.crop_size, self.source_size)]
项目:deep-coref    作者:clarkkev    | 项目源码 | 文件源码
def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict(X_test, verbose=0)
        classes = model.predict_classes(X_test, verbose=0)
        probas = model.predict_proba(X_test, verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights('temp.h5')

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)