Python keras.utils.data_utils 模块,get_file() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用keras.utils.data_utils.get_file()

项目:product-color-classifier    作者:two-tap    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:video_labelling_using_youtube8m    作者:LittleWat    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:deep-learning-models    作者:fchollet    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def load_data(path='conll2000.zip', min_freq=2):
    path = get_file(path, origin='https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/conll2000.zip')
    print(path)
    archive = ZipFile(path, 'r')
    train = _parse_data(archive.open('conll2000/train.txt'))
    test = _parse_data(archive.open('conll2000/test.txt'))
    archive.close()

    word_counts = Counter(row[0].lower() for sample in train for row in sample)
    vocab = ['<pad>', '<unk>'] + [w for w, f in iter(word_counts.items()) if f >= min_freq]
    pos_tags = sorted(list(set(row[1] for sample in train + test for row in sample)))  # in alphabetic order
    chunk_tags = sorted(list(set(row[2] for sample in train + test for row in sample)))  # in alphabetic order

    train = _process_data(train, vocab, pos_tags, chunk_tags)
    test = _process_data(test, vocab, pos_tags, chunk_tags)
    return train, test, (vocab, pos_tags, chunk_tags)
项目:vgg16-vgg19-resnet-inception-xception-example    作者:yong-ho    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:product-category-classifier    作者:two-tap    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:realLifePokedex    作者:agrimsingh    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:artificio    作者:ankonzoid    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:Papers2Code    作者:rainer85ah    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:Image-Captioning    作者:Shobhit20    | 项目源码 | 文件源码
def decode_imagenet_predictions(preds, top=5):
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []

    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        results.append(result)
    return results
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def fcn_Resnet50(input_shape = None, weight_decay=0.0002, batch_momentum=0.9, batch_shape=None, classes=40):

    img_input = Input(shape=input_shape)
    bn_axis = 3

    x = Conv2D(64, kernel_size=(7,7), subsample=(2, 2), border_mode='same', name='conv1', W_regularizer=l2(weight_decay))(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1))(x)
    x = identity_block(3, [64, 64, 256], stage=2, block='b')(x)
    x = identity_block(3, [64, 64, 256], stage=2, block='c')(x)

    x = conv_block(3, [128, 128, 512], stage=3, block='a')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='b')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='c')(x)
    x = identity_block(3, [128, 128, 512], stage=3, block='d')(x)

    x = conv_block(3, [256, 256, 1024], stage=4, block='a')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='b')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='c')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='d')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='e')(x)
    x = identity_block(3, [256, 256, 1024], stage=4, block='f')(x)

    x = conv_block(3, [512, 512, 2048], stage=5, block='a')(x)
    x = identity_block(3, [512, 512, 2048], stage=5, block='b')(x)
    x = identity_block(3, [512, 512, 2048], stage=5, block='c')(x)
    #classifying layer
    x = Conv2D(filters=40, kernel_size=(1,1), strides=(1,1), init='he_normal', activation='linear', border_mode='valid', W_regularizer=l2(weight_decay))(x)

    x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
    x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)

    model = Model(img_input, x)
    weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', RES_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
    model.load_weights(weights_path, by_name=True)

    return model
项目:keras-text    作者:raghakot    | 项目源码 | 文件源码
def get_embeddings_index(embedding_type='glove.42B.300d'):
    """Retrieves embeddings index from embedding name. Will automatically download and cache as needed.

    Args:
        embedding_type: The embedding type to load.

    Returns:
        The embeddings indexed by word.
    """

    embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
    if embeddings_index is not None:
        return embeddings_index

    data_obj = _EMBEDDING_TYPES.get(embedding_type)
    if data_obj is None:
        raise ValueError("Embedding name should be one of '{}'".format(_EMBEDDING_TYPES.keys()))

    cache_dir = os.path.expanduser(os.path.join('~', '.keras-text'))
    if not os.path.exists(cache_dir):
        os.makedirs(cache_dir)

    file_path = get_file(embedding_type, origin=data_obj['url'], extract=True,
                         cache_dir=cache_dir, cache_subdir='embeddings')
    file_path = os.path.join(os.path.dirname(file_path), data_obj['file'])

    embeddings_index = _build_embeddings_index(file_path)
    _EMBEDDINGS_CACHE[embedding_type] = embeddings_index
    return embeddings_index
项目:rnn-playlist-prediction    作者:burakkose    | 项目源码 | 文件源码
def load(mode=DatasetMode.small):
    base_path = get_file(DataConstants.dataset, origin=DataConstants.origin, untar=True)
    base_path = os.path.join(base_path, mode)

    train_path = os.path.join(base_path, DataConstants.train)
    test_path = os.path.join(base_path, DataConstants.test)
    song_path = os.path.join(base_path, DataConstants.song_hash)

    songs = dict(read_song_hash(song_path))
    train, test = read_dataset(train_path, test_path)

    return train, test, songs
项目:diluvian    作者:aschampion    | 项目源码 | 文件源码
def from_toml(filename):
        from keras.utils.data_utils import get_file

        volumes = {}
        with open(filename, 'rb') as fin:
            datasets = toml.load(fin).get('dataset', [])
            for dataset in datasets:
                hdf5_file = dataset['hdf5_file']
                if dataset.get('use_keras_cache', False):
                    hdf5_file = get_file(hdf5_file, dataset['download_url'], md5_hash=dataset.get('download_md5', None))
                image_dataset = dataset.get('image_dataset', None)
                label_dataset = dataset.get('label_dataset', None)
                mask_dataset = dataset.get('mask_dataset', None)
                mask_bounds = dataset.get('mask_bounds', None)
                resolution = dataset.get('resolution', None)
                hdf5_pathed_file = os.path.join(os.path.dirname(filename), hdf5_file)
                volume = HDF5Volume(hdf5_pathed_file,
                                    image_dataset,
                                    label_dataset,
                                    mask_dataset,
                                    mask_bounds=mask_bounds)
                # If the volume configuration specifies an explicit resolution,
                # override any provided in the HDF5 itself.
                if resolution:
                    logging.info('Overriding resolution for volume "%s"', dataset['name'])
                    volume.resolution = np.array(resolution)
                volumes[dataset['name']] = volume

        return volumes
项目:DeepLearning    作者:ChunML    | 项目源码 | 文件源码
def decode_predictions(preds):
    global CLASS_INDEX
    assert len(preds.shape) == 2 and preds.shape[1] == 1000
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    indices = np.argmax(preds, axis=-1)
    results = []
    for i in indices:
        results.append(CLASS_INDEX[str(i)])
    return results
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def run_conlleval(X_words_test, y_test, y_pred, index2word, index2chunk, pad_id=0):
    '''
    Runs the conlleval script for evaluation the predicted IOB-tags.
    '''
    url = 'http://www.cnts.ua.ac.be/conll2000/chunking/conlleval.txt'
    path = get_file('conlleval',
                    origin=url,
                    md5_hash='61b632189e5a05d5bd26a2e1ec0f4f9e')

    p = Popen(['perl', path], stdout=PIPE, stdin=PIPE, stderr=STDOUT)

    y_true = np.squeeze(y_test, axis=2)

    sequence_lengths = np.argmax(X_words_test == pad_id, axis=1)
    nb_samples = X_words_test.shape[0]
    conlleval_input = []
    for k in range(nb_samples):
        sent_len = sequence_lengths[k]
        words = list(map(lambda idx: index2word[idx], X_words_test[k][:sent_len]))
        true_tags = list(map(lambda idx: index2chunk[idx], y_true[k][:sent_len]))
        pred_tags = list(map(lambda idx: index2chunk[idx], y_pred[k][:sent_len]))
        sent = zip(words, true_tags, pred_tags)
        for row in sent:
            conlleval_input.append(' '.join(row))
        conlleval_input.append('')
    print()
    conlleval_stdout = p.communicate(input='\n'.join(conlleval_input).encode())[0]
    print(blue(conlleval_stdout.decode()))
    print()
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def load_names():
    from keras.utils.data_utils import get_file
    dirname = 'cifar-10-batches-py'
    origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    path = get_file(dirname, origin=origin, untar=True)
    with open(osp.join(path, 'batches.meta'), 'rb') as f:
        return pickle.load(f)['label_names']
项目:dsde-deep-learning    作者:broadinstitute    | 项目源码 | 文件源码
def nietzsche():
    path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
    text = open(path).read().lower()
    return text
项目:DeepGold    作者:scottvallance    | 项目源码 | 文件源码
def decode_predictions(preds):
    global CLASS_INDEX
    assert len(preds.shape) == 2 and preds.shape[1] == 1000
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    indices = np.argmax(preds, axis=-1)
    results = []
    for i in indices:
        results.append(CLASS_INDEX[str(i)])
    return results
项目:EvadeML-Zoo    作者:mzweilin    | 项目源码 | 文件源码
def get_densenet_weights_path(dataset_name="CIFAR-10", include_top=True):
    assert dataset_name == "CIFAR-10"
    if include_top:
        weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels.h5',
                                TF_WEIGHTS_PATH,
                                cache_subdir='models')
    else:
        weights_path = get_file('densenet_40_12_tf_dim_ordering_tf_kernels_no_top.h5',
                                TF_WEIGHTS_PATH_NO_TOP,
                                cache_subdir='models')
    return weights_path
项目:Super-Resolution-using-Generative-Adversarial-Networks    作者:titu1994    | 项目源码 | 文件源码
def load_vgg_weight(self, model):
        # Loading VGG 16 weights
        if K.image_dim_ordering() == "th":
            weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP,
                                   cache_subdir='models')
        else:
            weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP,
                                   cache_subdir='models')
        f = h5py.File(weights)

        layer_names = [name for name in f.attrs['layer_names']]

        if self.vgg_layers is None:
            self.vgg_layers = [layer for layer in model.layers
                               if 'vgg_' in layer.name]

        for i, layer in enumerate(self.vgg_layers):
            g = f[layer_names[i]]
            weights = [g[name] for name in g.attrs['weight_names']]
            layer.set_weights(weights)

        # Freeze all VGG layers
        for layer in self.vgg_layers:
            layer.trainable = False

        return model
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def get_pred_text_label(pred_id):
    CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
    fpath = get_file('imagenet_class_index.json',
                 CLASS_INDEX_PATH,
                 cache_subdir='models')
    label_dict = json.load(open(fpath))
    return label_dict[str(pred_id)][1]
项目:pose-regression    作者:dluvizon    | 项目源码 | 文件源码
def main_thread():

    # Build the model and load the pre-trained weights on MPII
    model = posereg.build(input_shape, pa16j.num_joints, export_heatmaps=True)
    weights_path = get_file(weights_file, TF_WEIGHTS_PATH, md5_hash=md5_hash,
            cache_subdir=cache_subdir)
    model.load_weights(weights_path)

    queue_frames = queue.Queue(2)
    queue_poses = queue.Queue(2)
    proc = threading.Thread(target=thread_grab_frames,
            args=(queue_frames, queue_poses))
    proc.daemon = True
    proc.start()

    clock = pygame.time.Clock()

    show_fps_cnt = 0
    while True:
        x = queue_frames.get()
        pred = model.predict(x)
        pred.append(x) # Append the input frame
        queue_poses.put(pred)

        clock.tick()
        show_fps_cnt += 1
        if show_fps_cnt == 10:
            show_fps_cnt = 0
            print ('fps: ' + str(clock.get_fps()))
项目:pix2pix-keras    作者:williamFalcon    | 项目源码 | 文件源码
def download_facades_bw(tmp_path, data_folder_path):
    # download to .tmp file
    downloaded_path = get_file(tmp_path + '/facades_bw.tar', origin=AWS_FACADES_PATH)

    # un-tar
    untar_file(downloaded_path, data_folder_path + '/facades_bw', remove_tar=False, flags='-xvf')

    # move data file
    subprocess.call(['rm', '-rf', tmp_path])
项目:deeputil    作者:Avkash    | 项目源码 | 文件源码
def CIFAR10(show_info=True):
        """
        This is pre-built CIFAR10 model trained for 250 epochs with 78.00% accuracy
        :param show_info:
        :return: model as Keras.Model
        """
        # Getting Config first
        config_path = get_file('cifar10_config_2000.json',
                                CONFIG_PATH,
                                cache_subdir='models')

        # Getting weights next
        weights_path = get_file('cifar10_weight_2000.h5',
                                WEIGHTS_PATH,
                                cache_subdir='models')

        config_found = False
        if os.path.isfile(config_path):
            config_found = True
        else:
            if show_info is True:
                print("Error: Unable to get the CIFAR10 model configuration on disk..")

        weight_found = False
        if os.path.isfile(weights_path):
            weight_found = True
        else:
            if show_info is True:
                print("Error: Unable to get the CIFAR10 model weights on disk..")

        if config_found is False and weight_found is False:
            if show_info is True:
                print("Error: Unable to get the CIFAR10 model..")

        return modelassist.ImportExport.import_keras_model_config_and_weight_and_compile(config_path, weights_path, show_info)
项目:deeputil    作者:Avkash    | 项目源码 | 文件源码
def MNIST2000(show_info=True):
        """
        This is pre-built MNIST model trained for 2000 epochs with 99.38% accuracy
        :param show_info:
        :return: model as Keras.Model
        """
        # Getting Config first
        config_path = get_file('mnist_config_100.json',
                                CONFIG_PATH,
                                cache_subdir='models')

        # Getting weights next
        weights_path = get_file('mnist_weight_100.h5',
                                WEIGHTS_PATH,
                                cache_subdir='models')

        config_found = False
        if os.path.isfile(config_path):
            config_found = True
        else:
            if show_info is True:
                print("Error: Unable to get the MNIST model configuration on disk..")

        weight_found = False
        if os.path.isfile(weights_path):
            weight_found = True
        else:
            if show_info is True:
                print("Error: Unable to get the MNIST model weights on disk..")

        if config_found is False and weight_found is False:
            if show_info is True:
                print("Error: Unable to get the MNIST model..")

        return modelassist.ImportExport.import_keras_model_config_and_weight_and_compile(config_path, weights_path, show_info)
项目:Socrates    作者:abhishekraok    | 项目源码 | 文件源码
def download_from_cloud(model_file_name, json_url, h5_url):
        print('Downloading from cloud')
        json_file_name, h5_file_name = SequenceModel.get_full_file_names(model_file_name)
        downloaded_json = get_file(os.path.normpath(json_file_name), origin=json_url)
        if downloaded_json != json_file_name:
            shutil.copy(downloaded_json, json_file_name)
        downloaded_h5 = get_file(os.path.normpath(h5_file_name), origin=h5_url)
        if downloaded_h5 != h5_file_name:
            shutil.copy(downloaded_h5, h5_file_name)
项目:keras-vggface    作者:rcmalli    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    LABELS = None
    if len(preds.shape) == 2:
        if preds.shape[1] == 2622:
            fpath = get_file('rcmalli_vggface_labels_v1.npy',
                             V1_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        elif preds.shape[1] == 8631:
            fpath = get_file('rcmalli_vggface_labels_v2.npy',
                             V2_LABELS_PATH,
                             cache_subdir=VGGFACE_DIR)
            LABELS = np.load(fpath)
        else:
            raise ValueError('`decode_predictions` expects '
                             'a batch of predictions '
                             '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                             '(samples, 8631) for V2.'
                             'Found array with shape: ' + str(preds.shape))
    else:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 2622)) for V1 or '
                         '(samples, 8631) for V2.'
                         'Found array with shape: ' + str(preds.shape))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [[str(LABELS[i].encode('utf8')), pred[i]] for i in top_indices]
        result.sort(key=lambda x: x[1], reverse=True)
        results.append(result)
    return results
项目:ActionRecognition    作者:woodfrog    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    """Decodes the prediction of an ImageNet model.

    # Arguments
        preds: Numpy tensor encoding a batch of predictions.
        top: integer, how many top-guesses to return.

    # Returns
        A list of lists of top class prediction tuples
        `(class_name, class_description, score)`.
        One list of tuples per sample in batch input.

    # Raises
        ValueError: in case of invalid shape of the `pred` array
            (must be 2D).
    """
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        result.sort(key=lambda x: x[2], reverse=True)
        results.append(result)
    return results
项目:ActionRecognition    作者:woodfrog    | 项目源码 | 文件源码
def decode_predictions(preds, top=5):
    """Decodes the prediction of an ImageNet model.

    # Arguments
        preds: Numpy tensor encoding a batch of predictions.
        top: integer, how many top-guesses to return.

    # Returns
        A list of lists of top class prediction tuples
        `(class_name, class_description, score)`.
        One list of tuples per sample in batch input.

    # Raises
        ValueError: in case of invalid shape of the `pred` array
            (must be 2D).
    """
    global CLASS_INDEX
    if len(preds.shape) != 2 or preds.shape[1] != 1000:
        raise ValueError('`decode_predictions` expects '
                         'a batch of predictions '
                         '(i.e. a 2D array of shape (samples, 1000)). '
                         'Found array with shape: ' + str(preds.shape))
    if CLASS_INDEX is None:
        fpath = get_file('imagenet_class_index.json',
                         CLASS_INDEX_PATH,
                         cache_subdir='models')
        CLASS_INDEX = json.load(open(fpath))
    results = []
    for pred in preds:
        top_indices = pred.argsort()[-top:][::-1]
        result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
        result.sort(key=lambda x: x[2], reverse=True)
        results.append(result)
    return results
项目:Keras-FCN    作者:aurora95    | 项目源码 | 文件源码
def get_weights_path_vgg16():
    TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
    weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',TF_WEIGHTS_PATH,cache_subdir='models')
    return weights_path
项目:Keras-FCN    作者:aurora95    | 项目源码 | 文件源码
def get_weights_path_resnet():
    TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
    weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',TF_WEIGHTS_PATH,cache_subdir='models')
    return weights_path
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def __init__(self, inputs, blocks, weights=None,
                 trainable=True, name='encoder'):
        inverse_pyramid = []

        # convolutional block
        conv_blocks = blocks[:-1]
        for i, block in enumerate(conv_blocks):
            if i == 0:
                x = block(inputs)
                inverse_pyramid.append(x)
            elif i < len(conv_blocks) - 1:
                x = block(x)
                inverse_pyramid.append(x)
            else:
                x = block(x)

        # fully convolutional block
        fc_block = blocks[-1]
        y = fc_block(x)
        inverse_pyramid.append(y)

        outputs = list(reversed(inverse_pyramid))

        super(Encoder, self).__init__(
            inputs=inputs, outputs=outputs)

        # load pre-trained weights
        if weights is not None:
            weights_path = get_file(
                '{}_weights_tf_dim_ordering_tf_kernels.h5'.format(name),
                weights,
                cache_subdir='models')
            layer_names = load_weights(self, weights_path)
            if K.image_data_format() == 'channels_first':
                layer_utils.convert_all_kernels_in_model(self)

        # Freezing basenet weights
        if trainable is False:
            for layer in self.layers:
                if layer.name in layer_names:
                    layer.trainable = False
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def fcn_vggbase(input_shape=(None,None,3)):

    img_input = Input(shape=input_shape)
    x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)

    x = Conv2D(filters=4096, kernel_size=(7, 7), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc6_lsun')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=4096, kernel_size=(1, 1), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc7_lsun')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=5, kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='lsun_score')(x)

    x = Conv2DTranspose(filters=5, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='lsun_upscore2')(x)
    output = _crop(img_input,offset=(32,32), name='score')(x)

    model = Model(img_input, output)
    weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
    model.load_weights(weights_path, by_name=True)

    return model
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def fcn16s_vggbase(input_shape=None, nb_class=None):
    img_input = Input(shape=input_shape)
    x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
    pool4 = x

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)

    x = Conv2D(filters=4096, kernel_size=(7, 7), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc6')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=4096, kernel_size=(1, 1), W_regularizer=l2(0.00005), activation='relu', padding='valid', name='fc7')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=nb_class, kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='p5score')(x)
    x = Conv2DTranspose(filters=nb_class, kernel_size=(4,4), strides=(2,2), kernel_initializer='he_normal', padding='valid', name='p5upscore')(x)

    pool4 = Conv2D(filters=nb_class, kernel_size=(1,1), kernel_initializer='he_normal', padding='valid', name='pool4_score')(pool4)
    pool4_score = _crop(x, offset=(5,5), name='pool4_score2')(pool4)
    m = merge([pool4_score,x], mode='sum')
    upscore = Conv2DTranspose(filters=nb_class, kernel_size=(32,32), strides=(16,16), padding='valid', name='merged_score')(m)
    score = _crop(img_input, offset=(27,27), name='output_score')(upscore)

    weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
    mdl = Model(img_input, score, name='fcn16s')
    mdl.load_weights(weights_path, by_name=True)

    return mdl
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def dilated_FCN_addmodule(input_shape=None):
    img_input = Input(shape=input_shape)
    x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(x)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same',name='block5_pool')(x)

    x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(7, 7), activation='relu', padding='valid', name='fc6')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=4096, kernel_initializer='he_normal', kernel_size=(1, 1), activation='relu', padding='valid', name='fc7')(x)
    x = Dropout(0.85)(x)
    x = Conv2D(filters=40,kernel_size=(1, 1), strides=(1,1), kernel_initializer='he_normal', padding='valid', name='score_fr')(x)
    #x = Cropping2D(cropping=((19, 36),(19, 29)), name='score')(x)
    x = ZeroPadding2D(padding=(33,33))(x)
    x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
    x = Conv2D(2*40, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
    x = Conv2D(4*40, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
    x = Conv2D(8*40, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
    x = Conv2D(16*40, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
    x = Conv2D(32*40, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
    x = Conv2D(32*40, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
    x = Conv2D(1*40, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
    x = Conv2DTranspose(filters=40, kernel_initializer='he_normal', kernel_size=(64, 64), strides=(32, 32), padding='valid',use_bias=False, name='upscore2')(x)
    x = CroppingLike2D(img_input, offset='centered', name='score')(x)

    mdl = Model(img_input, x, name='dilatedmoduleFCN')
    #weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
    mdl.load_weights('logs/model_June13_sgd_60kitr.h5', by_name=True)
    return mdl
项目:lsun-room    作者:leVirve    | 项目源码 | 文件源码
def dilated_FCN_frontended(input_shape=None, weight_decay=None, nb_classes=40):

    img_input = Input(shape=input_shape)

    #x = ZeroPadding2D(padding=(100, 100), name='pad1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
    x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)

    x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv1')(x)
    x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv2')(x)
    x = Conv2D(512, (3,3), dilation_rate=(2,2), activation='relu', name='block5_conv3')(x)

    x = Conv2D(4096, (3,3), kernel_initializer='he_normal', dilation_rate=(4,4), activation='relu', name='fc6')(x)
    x = Dropout(0.5, name='drop6')(x)
    x = Conv2D(4096, (1,1), kernel_initializer='he_normal', activation='relu', name='fc7')(x)
    x = Dropout(0.5, name='drop7')(x)
    x = Conv2D(nb_classes, (1,1), kernel_initializer='he_normal', activation='relu', name='fc_final')(x)


    #x = Conv2DTranspose(nb_classes, kernel_size=(64,64), strides=(32,32), padding='valid', name='upscore2')(x)    
    x = ZeroPadding2D(padding=(33,33))(x)
    x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv1')(x)
    x = Conv2D(2*nb_classes, (3,3), kernel_initializer='he_normal',activation='relu', name='dl_conv2')(x)
    x = Conv2D(4*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(2,2), activation='relu', name='dl_conv3')(x)
    x = Conv2D(8*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(4,4), activation='relu', name='dl_conv4')(x)
    x = Conv2D(16*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(8,8), activation='relu', name='dl_conv5')(x)
    x = Conv2D(32*nb_classes, (3,3), kernel_initializer='he_normal',dilation_rate=(16,16), activation='relu', name='dl_conv6')(x)
    x = Conv2D(32*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_conv7')(x)
    x = Conv2D(1*nb_classes, (1,1), kernel_initializer='he_normal',name='dl_final')(x)
    x = Conv2DTranspose(nb_classes, kernel_initializer='he_normal', kernel_size=(64,64), strides=(8,8), padding='valid', name='upscore2')(x)
    x = CroppingLike2D(img_input, offset='centered', name='score')(x)
    #x = Cropping2D(cropping=((19,36), (19,29)), name='score')(x)


    mdl = Model(input=img_input, output=x, name='dilated_fcn')
    weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', VGG_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
    mdl.load_weights(weights_path, by_name=True)
    return mdl
项目:keras-inceptionV4    作者:kentsommer    | 项目源码 | 文件源码
def inception_v4(num_classes, dropout_keep_prob, weights, include_top):
    '''
    Creates the inception v4 network

    Args:
        num_classes: number of classes
        dropout_keep_prob: float, the fraction to keep before final layer.

    Returns: 
        logits: the logits outputs of the model.
    '''

    # Input Shape is 299 x 299 x 3 (tf) or 3 x 299 x 299 (th)
    if K.image_data_format() == 'channels_first':
        inputs = Input((3, 299, 299))
    else:
        inputs = Input((299, 299, 3))

    # Make inception base
    x = inception_v4_base(inputs)


    # Final pooling and prediction
    if include_top:
        # 1 x 1 x 1536
        x = AveragePooling2D((8,8), padding='valid')(x)
        x = Dropout(dropout_keep_prob)(x)
        x = Flatten()(x)
        # 1536
        x = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs, x, name='inception_v4')

    # load weights
    if weights == 'imagenet':
        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
        if include_top:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='9fe79d77f793fe874470d84ca6ba4a3b')
        else:
            weights_path = get_file(
                'inception-v4_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='9296b46b5971573064d12e4669110969')
        model.load_weights(weights_path, by_name=True)
    return model