Python keras 模块,models() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.models()

项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--phase', default='train', help='Phase: Can be train or predict, the default value is train.')
    parser.add_argument('--model_file', default='./models/arci.config', help='Model_file: MatchZoo model file for the chosen model.')
    args = parser.parse_args()
    model_file =  args.model_file
    with open(model_file, 'r') as f:
        config = json.load(f)
    phase = args.phase
    if args.phase == 'train':
        train(config)
    elif args.phase == 'predict':
        predict(config)
    else:
        print('Phase Error.', end='\n')
    return
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--phase', default='train', help='Phase: Can be train or predict, the default value is train.')
    parser.add_argument('--model_file', default='./models/arci.config', help='Model_file: MatchZoo model file for the chosen model.')
    args = parser.parse_args()
    model_file =  args.model_file
    with open(model_file, 'r') as f:
        config = json.load(f)
    phase = args.phase
    if args.phase == 'train':
        train(config)
    elif args.phase == 'predict':
        predict(config)
    else:
        print('Phase Error.', end='\n')
    return
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def loadModel(self, modelPath):
        import h5py
        import json
        from neuralnets.keraslayers.ChainCRF import create_custom_objects

        model = keras.models.load_model(modelPath, custom_objects=create_custom_objects())

        with h5py.File(modelPath, 'r') as f:
            mappings = json.loads(f.attrs['mappings'])
            if 'additionalFeatures' in f.attrs:
                self.additionalFeatures = json.loads(f.attrs['additionalFeatures'])

            if 'maxCharLen' in f.attrs:
                self.maxCharLen = int(f.attrs['maxCharLen'])

        self.model = model        
        self.setMappings(None, mappings)
项目:Convolution-neural-networks-made-easy-with-keras    作者:mingruimingrui    | 项目源码 | 文件源码
def train(model, X_train, y_train, X_test, y_test):
    sys.stdout.write('Training model\n\n')
    sys.stdout.flush()

    # train each iteration individually to back up current state
    # safety measure against potential crashes
    epoch_count = 0
    while epoch_count < epoch:
        epoch_count += 1
        sys.stdout.write('Epoch count: ' + str(epoch_count) + '\n')
        sys.stdout.flush()
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=1, validation_data=(X_test, y_test))
        sys.stdout.write('Epoch {} done, saving model to file\n\n'.format(epoch_count))
        sys.stdout.flush()
        model.save_weights('./models/convnet_weights.h5')

    return model
项目:Convolution-neural-networks-made-easy-with-keras    作者:mingruimingrui    | 项目源码 | 文件源码
def train(model, X_train, y_train, X_test, y_test):
    sys.stdout.write('Training model with data augmentation\n\n')
    sys.stdout.flush()

    datagen = image_generator()
    datagen.fit(X_train)

    # train each iteration individually to back up current state
    # safety measure against potential crashes
    epoch_count = 0
    while epoch_count < epoch:
        epoch_count += 1
        sys.stdout.write('Epoch count: ' + str(epoch_count) + '\n')
        sys.stdout.flush()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            steps_per_epoch=len(X_train) // batch_size,
                            epochs=1,
                            validation_data=(X_test, y_test))
        sys.stdout.write('Epoch {} done, saving model to file\n\n'.format(epoch_count))
        sys.stdout.flush()
        model.save_weights('./models/convnet_improved_weights.h5')

    return model
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def train_process(self):
        client = GAClient.Client()
        for model in self.population.values():
            # if getattr(model, 'parent', None) is not None:
            # has parents means muatetion and weight change, so need to save weights
            keras.models.save_model(model.model, model.config.model_path)
            model.graph.save_params(model.config.output_path+'/graph.json')

            kwargs = dict(
                name=model.config.name,
                epochs=model.config.epochs,
                verbose=model.config.verbose,
                limit_data=model.config.limit_data,
                dataset_type=model.config.dataset_type
            )
            if parallel:
                client.run_self(kwargs)
            else:
                name, score = GAClient.run(**kwargs)
                setattr(self.population[name], 'score', score)

        if parallel:
            client.wait()
            for name, score in client.scores.items():
                setattr(self.population[name], 'score', score)
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def load_keras_model(weights, yaml=None, json=None,
                     normalise_conv_for_one_hot_encoded_input=False,
                     axis_of_normalisation=None,
                     name_of_conv_layer_to_normalise=None): 
    if (normalise_conv_for_one_hot_encoded_input):
        assert axis_of_normalisation is not None,\
         "specify axis of normalisation for normalising one-hot encoded input"
    assert yaml is not None or json is not None,\
     "either yaml or json must be specified"
    assert yaml is None or json is None,\
     "only one of yaml or json must be specified"
    if (yaml is not None):
        from keras.models import model_from_yaml 
        model = model_from_yaml(open(yaml).read()) 
    else:
        from keras.models import model_from_json 
        model = model_from_json(open(json).read()) 
    model.load_weights(weights) 
    if (normalise_conv_for_one_hot_encoded_input):
        mean_normalise_first_conv_layer_weights(
         model,
         axis_of_normalisation=axis_of_normalisation,
         name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise)
    return model
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_initial_state_GRU(self):
        data = np.random.rand(1, 1, 2)

        model = keras.models.Sequential()
        model.add(keras.layers.GRU(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)

        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = (np.random.rand(1, 5))
        model.get_layer(index=1).reset_states(states=hidden_state)
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()
        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)
        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_initial_state_SimpleRNN(self):
        data = np.random.rand(1, 1, 2)
        model = keras.models.Sequential()
        model.add(keras.layers.SimpleRNN(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)
        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = np.random.rand(1, 5)
        model.get_layer(index=1).reset_states(states=hidden_state)
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()
        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)
        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
项目:alt-i2v    作者:GINK03    | 项目源码 | 文件源码
def pred():
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  name_img150 = []
  for name in filter(lambda x: '.jpg' in x, sys.argv):
    img = Image.open('{name}'.format(name=name))
    img = img.convert('RGB')
    img150 = np.array(img.resize((150, 150)))
    name_img150.append( (name, img150) )
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  for name, img150 in name_img150:
    result = model.predict(np.array([img150]) )
    result = result.tolist()[0]
    result = { i:w for i,w in enumerate(result)}
    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print("{name} tag={tag} prob={prob}".format(name=name, tag=index_tag[i], prob=w) )
项目:alt-i2v    作者:GINK03    | 项目源码 | 文件源码
def train():
  for i in range(500):
    print('now iter {} load pickled dataset...'.format(i))
    Xs = []
    ys = []
    names = [name for idx, name in enumerate( glob.glob('../dataset/*.pkl') )]
    random.shuffle( names )
    for idx, name in enumerate(names):
      try:
        X,y = pickle.loads(open(name,'rb').read() ) 
      except EOFError as e:
        continue
      if idx%100 == 0:
        print('now scan iter', idx)
      if idx >= 15000:
        break
      Xs.append( X )
      ys.append( y )

    Xs = np.array( Xs )
    ys = np.array( ys )
    model.fit(Xs, ys, epochs=1 )
    print('now iter {} '.format(i))
    model.save_weights('models/{:09d}.h5'.format(i))
项目:alt-i2v    作者:GINK03    | 项目源码 | 文件源码
def pred():
  """
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  name_img150 = []
  for name in filter(lambda x: '.jpg' in x, sys.argv):
    img = Image.open('{name}'.format(name=name))
    img = img.convert('RGB')
    img150 = np.array(img.resize((150, 150)))
    name_img150.append( (name, img150) )
  """
  model.load_weights(sorted(glob.glob('models/*.h5'))[-1]) 

  tag_index = pickle.loads( open('make_datapair/tag_index.pkl', 'rb').read() )
  index_tag = { index:tag for tag,index in tag_index.items() }


  for name in glob.glob('./make_datapair/dataset/*'):
    X, y = pickle.loads( open(name,'rb').read() )
    result = model.predict(np.array([X]) )
    result = result.tolist()[0]
    result = { i:w for i,w in enumerate(result)}
    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print("{name} tag={tag} prob={prob}".format(name=name, tag=index_tag[i], prob=w) )
项目:Attention-DQN    作者:chasewind007    | 项目源码 | 文件源码
def compile(self, optimizer = None, loss_func = None):
        """Setup all of the TF graph variables/ops.

        This is inspired by the compile method on the
        keras.models.Model class.

        This is the place to create the target network, setup 
        loss function and any placeholders.
        """
        if loss_func is None:
            loss_func = mean_huber_loss
            # loss_func = 'mse'
        if optimizer is None:
            optimizer = Adam(lr = self.learning_rate)
            # optimizer = RMSprop(lr=0.00025)
        with tf.variable_scope("Loss"):
            state = Input(shape = (self.frame_height, self.frame_width, self.num_frames) , name = "states")
            action_mask = Input(shape = (self.num_actions,), name = "actions")
            qa_value = self.q_network(state)
            qa_value = merge([qa_value, action_mask], mode = 'mul', name = "multiply")
            qa_value = Lambda(lambda x: tf.reduce_sum(x, axis=1, keep_dims = True), name = "sum")(qa_value)

        self.final_model = Model(inputs = [state, action_mask], outputs = qa_value)
        self.final_model.compile(loss=loss_func, optimizer=optimizer)
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def load_model(model_spec_file, model_weights_file):
        from json import dumps, load

        params = load(open(model_spec_file, "r"))

        model = keras.models.model_from_json(dumps(params['model']))
        binary = params['binary']
        optimizer = params['optimizer']

        model.load_weights(model_weights_file)
        if binary:
            model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
        else:
            model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])

        lc = LanguageClassifier(model)
        lc.binary = binary
        return lc
项目:deepanalytics_compe26_benchmark    作者:takagiwa-ss    | 项目源码 | 文件源码
def resnet(repetition=2, k=1):
    '''Wide Residual Network (with a slight modification)
    depth == repetition*6 + 2
    '''
    from keras.models import Model
    from keras.layers import Input, Dense, Flatten, AveragePooling2D
    from keras.regularizers import l2

    input_shape = (1, _img_len, _img_len)
    output_dim = len(_columns)

    x = Input(shape=input_shape)

    z = conv2d(nb_filter=8, k_size=5, downsample=True)(x)        # out_shape ==    8, _img_len/ 2, _img_len/ 2
    z = bn_lrelu(0.01)(z)
    z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4
    z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8
    z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16
    z = AveragePooling2D((_img_len/16, _img_len/16))(z)
    z = Flatten()(z)
    z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z)

    return Model(input=x, output=z)
项目:DeepJet    作者:mstoye    | 项目源码 | 文件源码
def loadModel(self,filename):
        #import h5py
        #f = h5py.File(filename, 'r+')
        #del f['optimizer_weights']
        from keras.models import load_model
        self.keras_model=load_model(filename, custom_objects=global_loss_list)
        self.optimizer=self.keras_model.optimizer
        self.compiled=True
项目:Sacred_Deep_Learning    作者:AAbercrombie0492    | 项目源码 | 文件源码
def define_model(weights_path):
    '''
    Define model structure with weights.
    '''
    from resnet50 import ResNet50
    from keras.models import Model
    from keras.layers import Dense, GlobalAveragePooling2D


    resnet50_model = ResNet50()
    fc1000 = resnet50_model.get_layer('fc1000').output
    final_softmax = Dense(output_dim=2, activation='softmax')(fc1000)
    resnet50_finetune_1skip = Model(input=resnet50_model.input, output=final_softmax)
    resnet50_finetune_1skip.load_weights(weights_path)

    resnet50_finetune_1skip.compile(loss="categorical_crossentropy",
                                optimizer='nadam',
                                metrics=['accuracy'])

    return resnet50_finetune_1skip
项目:residual_block_keras    作者:keunwoochoi    | 项目源码 | 文件源码
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
    model = keras.models.Sequential()
    first_layer_channel = 128
    if is_mnist: # size to be changed to 32,32
        model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
        # the first conv 
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
    else:
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))

    model.add(Activation('relu'))
    # [residual-based Conv layers]
    residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
    model.add(residual_blocks)
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    # [Classifier]    
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    # [END]
    return model
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict
项目:vinci    作者:Phylliade    | 项目源码 | 文件源码
def Model(input, output, **kwargs):
    if int(keras.__version__.split('.')[0]) >= 2:
        return keras.models.Model(inputs=input, outputs=output, **kwargs)
    else:
        return keras.models.Model(input=input, output=output, **kwargs)
项目:pyannote-audio    作者:pyannote    | 项目源码 | 文件源码
def __init__(self, experiment_dir, db_yml=None):

        super(SpeakerEmbedding, self).__init__(
            experiment_dir, db_yml=db_yml)

        # architecture
        if 'architecture' in self.config_:
            architecture_name = self.config_['architecture']['name']
            models = __import__('pyannote.audio.embedding.models',
                                fromlist=[architecture_name])
            Architecture = getattr(models, architecture_name)
            self.architecture_ = Architecture(
                **self.config_['architecture'].get('params', {}))

        # approach
        if 'approach' in self.config_:
            approach_name = self.config_['approach']['name']
            approaches = __import__('pyannote.audio.embedding.approaches',
                                    fromlist=[approach_name])
            Approach = getattr(approaches, approach_name)
            self.approach_ = Approach(
                **self.config_['approach'].get('params', {}))

    # (5, None, None, False) ==> '5'
    # (5, 1, None, False) ==> '1-5'
    # (5, None, 2, False) ==> '5+2'
    # (5, 1, 2, False) ==> '1-5+2'
    # (5, None, None, True) ==> '5x'
项目:MovieTaster-Open    作者:lujiaying    | 项目源码 | 文件源码
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None
项目:MovieTaster-Open    作者:lujiaying    | 项目源码 | 文件源码
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
    model = keras.models.Sequential()
    model.add(Embedding(dict_size, emb_size, 
        input_length=context_window_size,
        embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
        ))
    model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
    model.add(Dense(dict_size))
    model.add(Activation('softmax')) # TODO: use nce

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
            loss='categorical_crossentropy',)
    return model
项目:MovieTaster-Open    作者:lujiaying    | 项目源码 | 文件源码
def train_cbow_base_model():
    min_word_freq = 5
    word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
    dict_size = len(word_dict)
    emb_size = 100
    context_window_size = 4
    epochs = 20
    batch_size = 128

    model = cbow_base_model(dict_size, emb_size, context_window_size)
    for epoch_id in xrange(epochs):
        # train by batch
        batch_id = 0
        x_batch = []
        y_batch = []
        for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
            batch_id += 1
            if batch_id % (batch_size*50) == 0:
                # Print evaluate log
                score = model.evaluate(np.array(x_batch),
                    keras.utils.to_categorical(y_batch, num_classes=dict_size))
                logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
            if batch_id % batch_size == 0:
                # Convert labels to categorical one-hot encoding
                model.train_on_batch(np.array(x_batch),
                        keras.utils.to_categorical(y_batch, num_classes=dict_size))
                x_batch = []
                y_batch = []
            x = np.array(movie_ids[:context_window_size])
            y = movie_ids[-1]
            x_batch.append(x)
            y_batch.append(y)
    logger.info('model train done')
    # store word embedding
    with open('./models/keras_0804_09_cbow', 'w') as fwrite:
        for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
            fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def make_init_model(self):
        models = []

        input_data = Input(shape=self.gl_config.input_shape)
        import random
        init_model_index = random.randint(1, 4)
        init_model_index = 1
        if init_model_index == 1:  # one conv layer with kernel num = 64
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d1' )(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 2:  # two conv layers with kernel num = 64
            stem_conv_0 = Conv2D(128, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 3:  # one conv layer with a wider kernel num = 128
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 4:  # two conv layers with a wider kernel_num = 128
            stem_conv_0 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)
        import keras
        stem_conv_1 = keras.layers.MaxPooling2D(name='maxpooling2d1')(stem_conv_1)
        stem_conv_1 = Conv2D(self.gl_config.nb_class, 3, padding='same', name='conv2d3')(stem_conv_1)
        stem_global_pooling_1 = GlobalMaxPooling2D(name='globalmaxpooling2d1')(stem_conv_1)
        stem_softmax_1 = Activation('softmax', name='activation1')(stem_global_pooling_1)

        model = Model(inputs=input_data, outputs=stem_softmax_1)

        return model
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def copy_model(self, model, config):
        from keras.utils.generic_utils import get_custom_objects
        from Model import IdentityConv, GroupIdentityConv

        get_custom_objects()['IdentityConv'] = IdentityConv
        get_custom_objects()['GroupIdentityConv'] = GroupIdentityConv

        new_model = MyModel(config, model.graph.copy(), keras.models.load_model(model.config.model_path))
        keras.models.save_model(new_model.model, new_model.config.model_path)
        return new_model
项目:donkey    作者:wroscoe    | 项目源码 | 文件源码
def load(self, model_path):
        self.model = keras.models.load_model(model_path)
项目:donkey    作者:wroscoe    | 项目源码 | 文件源码
def default_categorical():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense

    img_in = Input(shape=(120, 160, 3), name='img_in')                      # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)       # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)       # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)       # 64 features, 5px5p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)       # 64 features, 3px3p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)       # 64 features, 3px3p kernal window, 1wx1h stride, relu

    # Possibly add MaxPooling (will make it less sensitive to position in image).  Camera angle fixed, so may not to be needed

    x = Flatten(name='flattened')(x)                                        # Flatten to 1D (Fully connected)
    x = Dense(100, activation='relu')(x)                                    # Classify the data into 100 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Dense(50, activation='relu')(x)                                     # Classify the data into 50 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out 10% of the neurons (Prevent overfitting)
    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)        # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0

    #continous output of throttle
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)      # Reduce to 1 number, Positive number only

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    model.compile(optimizer='adam',
                  loss={'angle_out': 'categorical_crossentropy', 
                        'throttle_out': 'mean_absolute_error'},
                  loss_weights={'angle_out': 0.9, 'throttle_out': .001})

    return model
项目:donkey    作者:wroscoe    | 项目源码 | 文件源码
def default_linear():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense

    img_in = Input(shape=(120,160,3), name='img_in')
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='linear')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='linear')(x)
    x = Dropout(.1)(x)
    #categorical output of the angle
    angle_out = Dense(1, activation='linear', name='angle_out')(x)

    #continous output of throttle
    throttle_out = Dense(1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])


    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error', 
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})

    return model
项目:donkey    作者:wroscoe    | 项目源码 | 文件源码
def default_n_linear(num_outputs):
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda

    img_in = Input(shape=(120,160,3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (5,5), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)

    outputs = [] 

    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)


    model.compile(optimizer='adam',
                  loss='mse')

    return model
项目:donkey    作者:wroscoe    | 项目源码 | 文件源码
def default_imu(num_outputs, num_imu_inputs):
    '''
    Notes: this model depends on concatenate which failed on keras < 2.0.8
    '''

    from keras.layers import Input, Dense
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
    from keras.layers.merge import concatenate

    img_in = Input(shape=(120,160,3), name='img_in')
    imu_in = Input(shape=(num_imu_inputs,), name="imu_in")

    x = img_in
    x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    #x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)

    y = imu_in
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)

    z = concatenate([x, y])
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)

    outputs = [] 

    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z))

    model = Model(inputs=[img_in, imu_in], outputs=outputs)

    model.compile(optimizer='adam',
                  loss='mse')

    return model
项目:mpi_learn    作者:duanders    | 项目源码 | 文件源码
def load_model(filename=None, json_str=None, weights_file=None, custom_objects={}):
    """Loads model architecture from JSON and instantiates the model.
        filename: path to JSON file specifying model architecture
        json_str: (or) a json string specifying the model architecture
        weights_file: path to HDF5 file containing model weights
    custom_objects: A Dictionary of custom classes used in the model keyed by name"""
    import_keras()
    from keras.models import model_from_json
    if filename != None:
        with open( filename ) as arch_f:
            json_str = arch_f.readline()
    model = model_from_json( json_str, custom_objects=custom_objects) 
    if weights_file is not None:
        model.load_weights( weights_file )
    return model
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def convert_sequential_model(model,
                        num_dims=None,
                        nonlinear_mxts_mode=\
                         NonlinearMxtsMode.DeepLIFT_GenomicsDefault,
                        verbose=True,
                        dense_mxts_mode=DenseMxtsMode.Linear,
                        conv_mxts_mode=ConvMxtsMode.Linear,
                        maxpool_deeplift_mode=default_maxpool_deeplift_mode,
                        layer_overrides={}):
    if (verbose):
        print("nonlinear_mxts_mode is set to: "+str(nonlinear_mxts_mode))
    converted_layers = []
    if (model.layers[0].input_shape is not None):
        input_shape = model.layers[0].input_shape
        assert input_shape[0] is None #batch axis
        num_dims_input = len(input_shape)
        assert num_dims is None or num_dims_input==num_dims,\
        "num_dims argument of "+str(num_dims)+" is incompatible with"\
        +" the number of dims in layers[0].input_shape which is: "\
        +str(model.layers[0].input_shape)
        num_dims = num_dims_input
    else:
        input_shape = None
    converted_layers.append(
        blobs.Input(num_dims=num_dims, shape=input_shape, name="input"))
    #converted_layers is actually mutated to be extended with the
    #additional layers so the assignment is not strictly necessary,
    #but whatever
    converted_layers = sequential_container_conversion(
                layer=model, name="", verbose=verbose,
                nonlinear_mxts_mode=nonlinear_mxts_mode,
                dense_mxts_mode=dense_mxts_mode,
                conv_mxts_mode=conv_mxts_mode,
                maxpool_deeplift_mode=maxpool_deeplift_mode,
                converted_layers=converted_layers,
                layer_overrides=layer_overrides)
    deeplift.util.connect_list_of_layers(converted_layers)
    converted_layers[-1].build_fwd_pass_vars()
    return models.SequentialModel(converted_layers)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_initial_state_LSTM(self):
        data = np.random.rand(1, 1, 2)

        model = keras.models.Sequential()
        model.add(keras.layers.LSTM(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')

        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)

        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = (np.random.rand(1, 5), np.random.rand(1, 5))
        model.get_layer(index=1).reset_states(states=hidden_state)

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()

        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict(
            {'data': data, spec.description.input[1].name: hidden_state[0][0],
             spec.description.input[2].name: hidden_state[1][0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)

        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
项目:AI-Chatbot    作者:anujdutt9    | 项目源码 | 文件源码
def load(self):
        self.model = keras.models.load_model(memoryNetwork.FILE_NAME)
        with open(memoryNetwork.VOCAB_FILE_NAME, 'rb') as file:
            self.word_id = pickle.load(file)
项目:alt-i2v    作者:GINK03    | 项目源码 | 文件源码
def train():
  print('load lexical dataset...')
  Xs, Ys = loader(db='lexical150.ldb')
  print('build model...')
  model = build_model()
  for i in range(100):
    model.fit(np.array(Xs), np.array(Ys), batch_size=16, nb_epoch=1 )
    if i%1 == 0:
      model.save('models/model%05d.model'%i)
项目:alt-i2v    作者:GINK03    | 项目源码 | 文件源码
def eval():
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  model = build_model()
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  Xs, Ys = loader(db='lexical_eval.ldb', th=100)
  for i in range(30):
    result = model.predict(np.array([Xs[i]]) )

    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print(index_tag[i], i, w)
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def __init__(self, folder):
        super(EpochCheckpoint, self).__init__()
        assert folder is not None, "Err. Please specify a folder where models will be saved"
        self.folder = folder
        print "[LOG] EpochCheckpoint: folder to save models: "+self.folder
项目:devise-keras    作者:priyamtejaswin    | 项目源码 | 文件源码
def main():
    RUN_TIME = sys.argv[1]


    if RUN_TIME == "TRAIN":
        image_features = Input(shape=(4096,))
        model = build_model(image_features)
        print model.summary()

        # number of training images 
        _num_train = get_num_train_images()

        # Callbacks 
        # remote_cb = RemoteMonitor(root='http://localhost:9000')
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
        epoch_cb    = EpochCheckpoint(folder="./snapshots/")
        valid_cb    = ValidCallBack()

        # fit generator
        steps_per_epoch = math.ceil(_num_train/float(BATCH))
        print "Steps per epoch i.e number of iterations: ",steps_per_epoch

        train_datagen = data_generator(batch_size=INCORRECT_BATCH, image_class_ranges=TRAINING_CLASS_RANGES)
        history = model.fit_generator(
                train_datagen,
                steps_per_epoch=steps_per_epoch,
                epochs=250,
                callbacks=[tensorboard, valid_cb]
            )
        print history.history.keys()


    elif RUN_TIME == "TEST":
        from keras.models import load_model 
        model = load_model("snapshots/epoch_49.hdf5", custom_objects={"hinge_rank_loss":hinge_rank_loss})

    K.clear_session()
项目:FCN_MSCOCO_Food_Segmentation    作者:gakarak    | 项目源码 | 文件源码
def loadModelFromJson(pathModelJson):
        if not os.path.isfile(pathModelJson):
            raise Exception('Cant find JSON-file [%s]' % pathModelJson)
        tpathBase = os.path.splitext(pathModelJson)[0]
        tpathModelWeights = '%s.h5' % tpathBase
        if not os.path.isfile(tpathModelWeights):
            raise Exception('Cant find h5-Weights-file [%s]' % tpathModelWeights)
        with open(pathModelJson, 'r') as f:
            tmpStr = f.read()
            model = keras.models.model_from_json(tmpStr)
            model.load_weights(tpathModelWeights)
        return model
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def load(basename, **kwargs):
        model = ConvModel()

        model.impl = keras.models.load_model(basename, **kwargs)
        with open(basename + '.labels', 'r') as f:
            model.labels = json.load(f)

        with open(basename + '.vocab', 'r') as f:
            model.vocab = json.load(f)
        return model
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def create(w2v, labels, **kwargs):
        model = ConvModel()
        model.labels = labels
        model.vocab = w2v.vocab
        filtsz = kwargs['filtsz']
        pdrop = kwargs.get('dropout', 0.5)
        mxlen = int(kwargs.get('mxlen', 100))
        cmotsz = kwargs['cmotsz']
        finetune = bool(kwargs.get('finetune', True))
        nc = len(labels)
        x = Input(shape=(mxlen,), dtype='int32', name='input')

        vocab_size = w2v.weights.shape[0]
        embedding_dim = w2v.dsz

        lut = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[w2v.weights], input_length=mxlen, trainable=finetune)

        embed = lut(x)

        mots = []
        for i, fsz in enumerate(filtsz):
            conv = Conv1D(cmotsz, fsz, activation='relu')(embed)
            gmp = GlobalMaxPooling1D()(conv)
            mots.append(gmp)

        joined = merge(mots, mode='concat')
        cmotsz_all = cmotsz * len(filtsz)
        drop1 = Dropout(pdrop)(joined)

        input_dim = cmotsz_all
        last_layer = drop1
        dense = Dense(output_dim=nc, input_dim=input_dim, activation='softmax')(last_layer)
        model.impl = keras.models.Model(input=[x], output=[dense])
        return model
项目:baseline    作者:dpressel    | 项目源码 | 文件源码
def get_vocab(self):
        return self.vocab

# TODO: Add the other models!
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def predict(self, X):
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')

        predictions = self.model.predict(X, verbose=True, batch_size=32)
        if (len(predictions.shape) > 1) and (1 not in predictions.shape):
            predictions = predictions.argmax(axis=-1)
        else:
            predictions = 1 * (predictions > 0.5).ravel()
        return predictions
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def predict_proba(self, X):
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')
        return self.model.predict(X, verbose=True, batch_size=32)
项目:cervantes    作者:textclf    | 项目源码 | 文件源码
def test(self, X, y, verbose=True):
        # if we don't need 3d inputs...
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')

        if verbose:
            print("Getting predictions on the test set")
        predictions = self.predict(X)

        if len(predictions) != len(y):
            raise LanguageClassifierException("Non comparable arrays")

        if self.binary:
            acc = (predictions == y).mean()
            prec = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(predictions)
            recall = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(y)
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
                print("Precision for class=1: {0:.3f}".format(prec))
                print("Recall for class=1: {0:.3f}".format(recall))

            return (acc, prec, recall)
        else:
            # TODO: Obtain more metrics for the multiclass problem
            acc = (predictions == y).mean()
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
            return acc
项目:deepanalytics_compe26_benchmark    作者:takagiwa-ss    | 项目源码 | 文件源码
def _load_model(fn):
    from keras.models import model_from_json
    with open(fn + '.json') as f:
        model = model_from_json(f.read())
    model.load_weights(fn + '.h5')
    return model
项目:keras-resnet-food-reverse-engineering    作者:GINK03    | 项目源码 | 文件源码
def train():
  print('load lexical dataset...')
  Ys, Xs, Rs = loader()
  print('build model...')
  model = build_model()
  for i in range(100):
    model.fit(np.array(Xs), np.array(Ys), batch_size=16, nb_epoch=1 )
    if i%1 == 0:
      model.save('models/model%05d.model'%i)
项目:keras-resnet-food-reverse-engineering    作者:GINK03    | 项目源码 | 文件源码
def eval():
  item_index = pickle.loads(open("cookpad/item_index.pkl", "rb").read())
  index_items = { index:item for item, index in item_index.items()}
  model = build_model()
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  Ys, Xs, Rs = loader(th=10)
  for i in range(len(Xs)):
    result = model.predict(np.array([Xs[i]]) )
    ares   = [(index_items[index], w) for index, w in enumerate(result.tolist()[0]) ]
    print(Rs[i])
    for en, (item, w) in enumerate(sorted(ares, key=lambda x:x[1]*-1)[:10]):
      print(en, item, w)
项目:keras-resnet-food-reverse-engineering    作者:GINK03    | 项目源码 | 文件源码
def pred():
  item_index = pickle.loads(open("cookpad/item_index.pkl", "rb").read())
  index_items = { index:item for item, index in item_index.items()}
  model = build_model()
  model = load_model(sorted(glob.glob('models/model00060.model'))[-1]) 
  target_size = (224,224)
  dir_path = "to_pred/*"
  max_size = len(glob.glob(dir_path))
  for i, name in enumerate(glob.glob(dir_path)):
    try:
      img = Image.open(name)
    except OSError as e:
      continue
    print(i, max_size, name.split('/')[-1])
    w, h = img.size
    if w > h :
      blank = Image.new('RGB', (w, w))
    if w <= h :
      blank = Image.new('RGB', (h, h))
    blank.paste(img, (0, 0) )
    blank = blank.resize( target_size )
    Xs = np.array([np.asanyarray(blank)])
    result = model.predict(Xs)
    ares   = [(index_items[index], w) for index, w in enumerate(result.tolist()[0]) ]
    for en, (item, w) in enumerate(sorted(ares, key=lambda x:x[1]*-1)[:10]):
      print(en, item, w)