Python model 模块,create_model() 实例源码

我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用model.create_model()

项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:satellite-image-object-detection    作者:marcbelmont    | 项目源码 | 文件源码
def load_inference(sess, ckptdir, threshold):
    images = tf.placeholder(tf.float32, shape=[None, IMG_SIZE, IMG_SIZE, 3])
    net = create_model(images, .1)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep=10)
    if ckptdir and os.path.exists(ckptdir) and not FLAGS.debug:
        checkpoint = tf.train.latest_checkpoint(ckptdir)
        if checkpoint:
            print('Restoring', checkpoint)
            saver.restore(sess, checkpoint)
    return inference(net, threshold), images

###########
# Helpers #
###########
项目:tensorflow_mnist_cloudml    作者:mainyaa    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:tensorflow_mnist_cloudml    作者:mainyaa    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:Image-Caption-Generator    作者:shagunsodhani    | 项目源码 | 文件源码
def predict(image_name,
            data_dir="/home/shagun/projects/Image-Caption-Generator/data/",
            weights_path=None,
            mode="test"):
    '''Method to predict the caption for a given image.
    weights_path is the path to the .h5 file (model)'''

    image_path = data_dir + "images/" + image_name
    vgg_model = load_vgg16()
    vgg_embedding = vgg_model.predict(
        load_image(image_path)
    )
    image_embeddings = [vgg_embedding]

    config_dict = generate_config(data_dir=data_dir,
                                  mode=mode)
    print(config_dict)

    model = create_model(config_dict=config_dict,
                         compile_model=False)

    model.load_weights(data_dir + "model/" + weights_path)

    tokenizer = get_tokenizer(config_dict=config_dict,
                              data_dir=data_dir)

    index_to_word = {v: k for k, v in tokenizer.word_index.items()}

    for image_embedding in image_embeddings:
        gen_captions(config=config_dict,
                     model=model,
                     image_embedding=image_embedding,
                     tokenizer=tokenizer,
                     num_captions=2,
                     index_to_word=index_to_word
                     )
项目:kaggle-youtube-8m    作者:liufuyang    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:kaggle-youtube-8m    作者:liufuyang    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:kaggle-youtube-8m    作者:liufuyang    | 项目源码 | 文件源码
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
项目:cs224n_prj    作者:lps-stanf    | 项目源码 | 文件源码
def perform_testing(settings, id_to_word_dict, captions_data=None, coco_images_dir=None, coco_num_images=None,
                    coco_out_path=None):
    from preprocess import TokenBegin, TokenEnd

    with h5py.File(settings.preprocessed_images_file, 'r') as h5_images_file:
        image_shape = h5_images_file['images'].shape[1:]

    with h5py.File(settings.preprocessed_text_file, 'r') as h5_text_file:
        sentence_max_len = len(h5_text_file['sentences'][0])

    dict_size = len(id_to_word_dict)

    TokenBeginIndex = find_token_index(id_to_word_dict, TokenBegin)
    TokenEndIndex = find_token_index(id_to_word_dict, TokenEnd)

    model = create_model(image_shape, dict_size, sentence_max_len, settings)
    model.load_weights(settings.weights)

    if captions_data is not None and coco_images_dir is not None and coco_num_images is not None \
            and coco_out_path is not None:
        calculate_metrics(settings, model, id_to_word_dict, captions_data, coco_images_dir, coco_num_images,
                          coco_out_path, image_shape[:2], sentence_max_len, TokenBeginIndex, TokenEndIndex)
    else:
        create_caption_for_path(settings.test_source, model, image_shape[:2], sentence_max_len, TokenBeginIndex,
                                TokenEndIndex, id_to_word_dict, settings.output_dir,
                                (settings.out_max_width, settings.out_max_height))
项目:Image-Caption-Generator    作者:abi-aryan    | 项目源码 | 文件源码
def train(batch_size=128,
          epochs=100,
          data_dir="/home/shagun/projects/Image-Caption-Generator/data/",
          weights_path=None,
          mode="train"):
    '''Method to train the image caption generator
    weights_path is the path to the .h5 file where weights from the previous
    run are saved (if available)'''

    config_dict = generate_config(data_dir=data_dir,
                                  mode=mode)
    config_dict['batch_size'] = batch_size
    steps_per_epoch = config_dict["total_number_of_examples"] // batch_size

    print("steps_per_epoch = ", steps_per_epoch)
    train_data_generator = debug_generator(config_dict=config_dict,
                                           data_dir=data_dir)

    model = create_model(config_dict=config_dict)

    if weights_path:
        model.load_weights(weights_path)

    file_name = data_dir + "model/weights-{epoch:02d}.hdf5"
    checkpoint = ModelCheckpoint(filepath=file_name,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    tensorboard = TensorBoard(log_dir='../logs',
                              histogram_freq=0,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    callbacks_list = [checkpoint, tensorboard]
    model.fit_generator(
        generator=train_data_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        verbose=2,
        callbacks=callbacks_list)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser(description='Predicts pixel intensities given a random subset of an image.')

    # Experiment settings
    parser.add_argument('--inputdir', default='experiments/pixels/data', help='The directory where the input data files will be stored.')
    parser.add_argument('--outputdir', default='experiments/pixels/results', help='The directory where the input data files will be stored.')
    parser.add_argument('--variable_scope', default='pixels-', help='The variable scope that the model will be created with.')
    parser.add_argument('--train_id', type=int, default=0, help='A trial ID. All models trained with the same trial ID will use the same train/validation datasets.')
    parser.add_argument('--train_samples', type=int, default=50000, help='The number of training examples to use.')
    parser.add_argument('--test_samples', type=int, default=10000, help='The number of training examples to use.')
    parser.add_argument('--validation_pct', type=float, default=0.2,
                                        help='The number of samples to hold out for a validation set. This is a percentage of the training samples.')
    parser.add_argument('--dimsize', type=int, default=256, help='The number of bins for each subpixel intensity (max 256, must be a power of 2).')
    parser.add_argument('--batchsize', type=int, default=50, help='The number of training samples per mini-batch.')

    # GMM/LMM settings
    parser.add_argument('--num_components', type=int, default=5, help='The number of mixture components for gmm or lmm models.')

    # Get the arguments from the command line
    args = parser.parse_args()
    dargs = vars(args)
    dargs['model'] = 'gmm'
    dargs['dataset'] = 'cifar'
    dargs['outfile'] = os.path.join(dargs['outputdir'], '{model}_{dataset}_{train_samples}_{num_components}_{train_id}'.format(**dargs))
    dargs['variable_scope'] = '{model}-{dataset}-{train_samples}-{num_components}-{train_id}'.format(**dargs)


    # Get the data
    from cifar_utils import DataLoader
    train_data = DataLoader(args.inputdir, 'train', args.train_samples, args.batchsize, seed=args.train_id, dimsize=args.dimsize)
    validate_data = DataLoader(args.inputdir, 'validate', args.train_samples, args.batchsize, seed=args.train_id, dimsize=args.dimsize)
    test_data = DataLoader(args.inputdir, 'test', args.test_samples, args.batchsize, seed=args.train_id, dimsize=args.dimsize)

    dargs['x_shape'] = train_data.x_shape()
    dargs['y_shape'] = train_data.y_shape()
    dargs['lazy_density'] = True # density is too big to enumerate for cifar
    dargs['one_hot'] = False # We use just the intensities not a one-hot

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))

    # Get the X placeholder and the output distribution model
    tf_X, dist = create_model(**dargs)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    # Reset the model back to the best version
    saver.restore(sess, dargs['outfile'])

    logprobs, rmse = explicit_score(sess, args.model, dist, test_data, tf_X)
    print logprobs, rmse
    np.savetxt(dargs['outfile'] + '_score.csv', [best_loss, logprobs, rmse, args.k, args.lam, args.num_components])
项目:Image-Caption-Generator    作者:shagunsodhani    | 项目源码 | 文件源码
def train(batch_size=128,
          epochs=100,
          data_dir="/home/shagun/projects/Image-Caption-Generator/data/",
          weights_path=None,
          mode="train"):
    '''Method to train the image caption generator
    weights_path is the path to the .h5 file where weights from the previous
    run are saved (if available)'''

    config_dict = generate_config(data_dir=data_dir,
                                  mode=mode)
    config_dict['batch_size'] = batch_size
    steps_per_epoch = config_dict["total_number_of_examples"] // batch_size

    print("steps_per_epoch = ", steps_per_epoch)
    train_data_generator = debug_generator(config_dict=config_dict,
                                           data_dir=data_dir)

    model = create_model(config_dict=config_dict)

    if weights_path:
        model.load_weights(weights_path)

    file_name = data_dir + "model/weights-{epoch:02d}.hdf5"
    checkpoint = ModelCheckpoint(filepath=file_name,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    tensorboard = TensorBoard(log_dir='../logs',
                              histogram_freq=0,
                              batch_size=batch_size,
                              write_graph=True,
                              write_grads=True,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)

    callbacks_list = [checkpoint, tensorboard]
    model.fit_generator(
        generator=train_data_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        verbose=2,
        callbacks=callbacks_list)
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas    作者:pacocp    | 项目源码 | 文件源码
def train_model_CV(slices_images,slices_labels,slice_number,f):
    '''
    Training model using cross-validation passing the slices manually

    Parameters
    ----------
    slices_images: list of numpy.array
    slices_labels: list of numpy.array

    Output
    ----------
    Write in a results file the mean of the accuracy in the test set
    '''

    #images,labels,list_of_images = reorderRandomly(images,labels,list_of_images)
    '''
    for i in range(len(labels)):
        if labels[i] == "AD":
            labels[i] = 0
        else:
            labels[i] = 1

    slices_images = [images[i::5] for i in range(5)]
    slices_list_of_images = [list_of_images[i::5] for i in range(5)]
    slices_labels = [labels[i::5] for i in range(5)]

    print(slices_list_of_images)
    '''
    values_acc = []
    for i in range(5):
        model = create_model()
        X_test = slices_images[i]
        Y_test = slices_labels[i]
        X_train = [item
                    for s in slices_images if s is not X_test
                    for item in s]
        Y_train = [item
                    for s in slices_labels if s is not Y_test
                    for item in s]

        X_train = np.array(X_train)
        Y_train = np.array(Y_train)
        X_test = np.array(X_test)
        Y_test = np.array(Y_test)
        from keras.utils.np_utils import to_categorical
        Y_train = to_categorical(Y_train)
        Y_test = to_categorical(Y_test)
        history = model.fit(X_train,Y_train,epochs=70,batch_size=5,verbose=0)
        test_loss = model.evaluate(X_test,Y_test)
        print("Loss and accuracy in the test set: Loss %g, Accuracy %g"%(test_loss[0],test_loss[1]))
        values_acc.append(test_loss[1])

    mean = calculate_mean(values_acc)
    f.write(("The mean of all the test values for the slice %g is: %g \n"%(slice_number,mean)))
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas    作者:pacocp    | 项目源码 | 文件源码
def train_model_CV_generator(images,labels,model,train_datagen):
    '''
    Training model using cross-validation

    Parameters
    ----------

    '''
    train_datagen = ImageDataGenerator(horizontal_flip=True)
    images,labels = reorderRandomly(images,labels)

    for i in range(len(labels)):
        if labels[i] == "AD":
            labels[i] = 0
        else:
            labels[i] = 1

    slices_images = [images[i::5] for i in range(5)]
    slices_labels = [labels[i::5] for i in range(5)]

    models = {}
    histories = {}
    values_acc = []
    for i in range(5):
        model = create_model()
        X_test = slices_images[i]
        Y_test = slices_labels[i]
        X_train = [item
                    for s in slices_images if s is not X_test
                    for item in s]
        Y_train = [item
                    for s in slices_labels if s is not Y_test
                    for item in s]

        X_train = np.array(X_train)
        Y_train = np.array(Y_train)
        X_test = np.array(X_test)
        Y_test = np.array(Y_test)
        from keras.utils.np_utils import to_categorical
        Y_train = to_categorical(Y_train)
        Y_test = to_categorical(Y_test)
        history = model.fit_generator(train_datagen.flow(X_train,Y_train,batch_size = 5),
        epochs=70,steps_per_epoch= len(X_train)//5)
        models['model'+str(i)] = model
        test_loss = model.evaluate(X_test,Y_test)
        print("Loss and accuracy in the test set: Loss %g, Accuracy %g"%(test_loss[0],test_loss[1]))
        histories['test_acc'+str(i)] = test_loss
        values_acc.append(test_loss[1])

    mean = calculate_mean(values_acc)
    print("The mean of all the test values is: %g"%mean)
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas    作者:pacocp    | 项目源码 | 文件源码
def train_LOO(images,labels):
    '''
    Training model using LOO validation using all the images

    Parameters
    ----------
    images: list of numpy.array
    labels: list of numpy.array

    Output
    ----------
    Print the mean of the LOO validation
    '''
    values_acc = []
    for i in range(len(labels)):
        if labels[i] == "AD":
            labels[i] = 0
        else:
            labels[i] = 1
    print("The lenght of images is "+str(len(images)))
    for i in range(len(images)):
        print("We are in the fold "+str(i))
        model = create_model()
        X_test = []
        Y_test = []
        X_test.append(images[i])
        Y_test.append(labels[i])
        X_train = []
        Y_train = []
        for j in range(len(images)):
            if j != i:
                X_train.append(images[j])
                Y_train.append(labels[j])


        X_train = np.array(X_train)
        Y_train = np.array(Y_train)
        X_test = np.array(X_test)
        Y_test = np.array(Y_test)
        from keras.utils.np_utils import to_categorical
        Y_train = to_categorical(Y_train,2)
        Y_test = to_categorical(Y_test,2)


        history = model.fit(X_train,Y_train,epochs=70,batch_size=10,verbose=0)
        test_loss = model.evaluate(X_test,Y_test)
        print("Loss and accuracy in the test set: Loss %g, Accuracy %g"%(test_loss[0],test_loss[1]))
        values_acc.append(test_loss[1])

    mean = calculate_mean(values_acc)
    print("The mean of all the test values is: %g"%mean)
项目:Deep-Learning-para-diagnostico-a-partir-de-imagenes-Biomedicas    作者:pacocp    | 项目源码 | 文件源码
def train_LOO_pacient(images,labels,names):
    '''
    Training model using LOO validation by pacient

    Parameters
    ----------
    images: list of numpy.array. images of the pacients
    labels: list of numpy.array. labels of the different images
    names: list of numpy.array. name of the pacients
    Output
    ----------
    Print the mean of the LOO validation
    '''
    print("Training LOO with pacient name")
    values_acc = []
    already_tested = []
    for i in range(len(labels)):
        if labels[i] == "AD":
            labels[i] = 0
        else:
            labels[i] = 1
    print("The lenght of images is "+str(len(images)))
    for i in range(len(images)):
        model = create_model()
        X_test = []
        Y_test = []
        # If we haven't tested that pacient already
        # we insert it to the test set and also
        # all the images with the same name
        if(not(names[i] in already_tested)):
            already_tested.append(names[i])
            X_test.append(images[i])
            Y_test.append(labels[i])
            for j in range(len(images)):
                if j!=i and names[j] == names[i]:
                    X_test.append(images[j])
                    Y_test.append(labels[j])
            X_train = []
            Y_train = []
            for j in range(len(images)):
                if names[j] != names[i]:
                    X_train.append(images[j])
                    Y_train.append(labels[j])

            X_train = np.array(X_train)
            Y_train = np.array(Y_train)
            X_test = np.array(X_test)
            Y_test = np.array(Y_test)
            from keras.utils.np_utils import to_categorical
            Y_train = to_categorical(Y_train,2)
            Y_test = to_categorical(Y_test,2)
            print("The len of the training set is: "+str(len(X_train)))
            print("The len of the test set is: "+str(len(X_test)))

            history = model.fit(X_train,Y_train,epochs=70,batch_size=10)
            test_loss = model.evaluate(X_test,Y_test)
            print("Loss and accuracy in the test set: Loss %g, Accuracy %g"%(test_loss[0],test_loss[1]))
            values_acc.append(test_loss[1])

    mean = calculate_mean(values_acc)
    print("The mean of all the test values is: %g"%mean)