Python keras.initializations 模块,glorot_uniform() 实例源码

我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.initializations.glorot_uniform()

项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def prep_embed(self, full_i2w_dict, ind2word, w2v_dim):
        '''
        Prepare embedding vector for each word in full_dict

        Words which are in word2vec vocab are replaced by respective wordvector
        OOV words i.e words that are not in word2vec are replaced by random weight(rand_weight)
        '''
        embed_weight=np.zeros((len(full_i2w_dict),w2v_dim))
        embed_dict={}
        for k,v in full_i2w_dict.items():
            if k in ind2word:
                model_weight=np.array(self.model[v])
                embed_weight[k]=model_weight
                embed_dict[k]=model_weight
            else: 
                rand_weight=np.array(glorot_uniform((w2v_dim,)).eval())
                embed_weight[k]=rand_weight
                embed_dict[k]=rand_weight
        return embed_weight, embed_dict
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_conv_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.l1 = Convolution2D(32, 8, 8, activation='elu', init=init, subsample=(4, 4), border_mode='same')(
            self.state_in)
        self.l2 = Convolution2D(64, 4, 4, activation='elu', init=init, subsample=(2, 2), border_mode='same')(
            self.l1)
        # self.l3 = Convolution2D(64, 3, 3, activation='relu', init=init, subsample=(1, 1), border_mode='same')(
        #     self.l2)
        self.l3 = self.l2
        self.h = Flatten()(self.l3)
        self.hidden = Dense(256, init=init, activation='elu')(self.h)
        self.value = Dense(1, init=init)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)
        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:Asynchronous-RL-agent    作者:Fritz449    | 项目源码 | 文件源码
def create_fc_model(self):
        # This is the place where neural network model initialized
        init = 'glorot_uniform'
        self.state_in = Input(self.state_dim)
        self.hidden = Dense(256, init=init, activation='elu')(self.state_in)
        self.value = Dense(1)(self.hidden)
        self.policy = Dense(self.action_dim, init=init, activation='softmax')(self.hidden)

        self.q_values = self.entropy_coef * (Theano.log(self.policy + 1e-18) -
                                             Theano.tile(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
                                                                    axis=[1], keepdims=True), (1, self.action_dim)))
        # print (type(Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)))
        # print(Theano.function([self.state_in], [Theano.sum(Theano.log(self.policy + 1e-18) * self.policy,
        #                                                 axis=[1], keepdims=True)])([np.zeros((32,) + self.state_dim)])[0].shape)
        # 1/0
        self.q_values = self.q_values + Theano.tile(self.value, (1, self.action_dim))
        self.model = Model(self.state_in, output=[self.policy, self.value])
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_glorot_uniform(tensor_shape):
    scale = np.sqrt(6. / (SHAPE[0] + SHAPE[1]))
    _runner(initializations.glorot_uniform, tensor_shape, target_mean=0.,
            target_max=scale, target_min=-scale)
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def unitary_ASB2016_init(shape, name=None):
    assert shape[0]==shape[1]
    N=shape[1]

    theta = initializations.uniform((3,N),scale=np.pi,name='{}_theta'.format(name))
    reflection = initializations.glorot_uniform((2,2*N),name='{}_reflection'.format(name))
    idxperm = np.random.permutation(N)
    idxpermaug = np.concatenate((idxperm,N+idxperm))

    Iaug=augLeft(np.concatenate((np.eye(N),np.zeros((N,N))),axis=0),module=np).astype(np.float32)
    Uaug=times_unitary_ASB2016(Iaug,N,[theta,reflection,idxpermaug])

    return Uaug,theta,reflection,idxpermaug
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_glorot_uniform(tensor_shape):
    scale = np.sqrt(6. / (SHAPE[0] + SHAPE[1]))
    _runner(initializations.glorot_uniform, tensor_shape, target_mean=0.,
            target_max=scale, target_min=-scale)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_glorot_uniform(tensor_shape):
    scale = np.sqrt(6. / (SHAPE[0] + SHAPE[1]))
    _runner(initializations.glorot_uniform, tensor_shape, target_mean=0.,
            target_max=scale, target_min=-scale)
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def unitary_ASB2016_init(shape, name=None):
    assert shape[0]==shape[1]
    N=shape[1]

    theta = initializations.uniform((3,N),scale=np.pi,name='{}_theta'.format(name))
    reflection = initializations.glorot_uniform((2,2*N),name='{}_reflection'.format(name))
    idxperm = np.random.permutation(N)
    idxpermaug = np.concatenate((idxperm,N+idxperm))

    Iaug=augLeft(np.concatenate((np.eye(N),np.zeros((N,N))),axis=0),module=np).astype(np.float32)
    Uaug=times_unitary_ASB2016(Iaug,N,[theta,reflection,idxpermaug])

    return Uaug,theta,reflection,idxpermaug
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def make_hash_embeddings(igor, vocab):
    assert os.path.exists(igor.target_glove), "You need to specify a real file"
    fileiter = open(igor.target_glove).readlines()

    hash_vocab = Vocabulary()
    hash_vocab.use_mask = True
    hash_vocab.add(hash_vocab.mask_symbol)
    hash_vocab.add(hash_vocab.unk_symbol)
    word2hash = {}
    for word, v_id in vocab.items():
        ids = hash_vocab.add_many(hash_word(word))
        word2hash[v_id] = ids

    embeddings = np.zeros((len(hash_vocab), igor.embedding_size))
    remaining_vocab = set(vocab.keys())
    remaining_hashes = set(hash_vocab.values())
    for line in tqdm(fileiter):
        line = line.replace("\n","").split(" ")
        word, nums = line[0], [float(x.strip()) for x in line[1:]]
        word_hash = hash_word(word)
        if word in remaining_vocab:
            hash_ids = word2hash[vocab[word]]
            remaining_vocab.remove(word)
            remaining_hashes.difference_update(hash_ids)
            embeddings[hash_ids] += np.array(nums) / len(hash_ids)
    print("{} words were not seen.  {} hashes were not seen".format(len(remaining_vocab),
                                                                    len(remaining_hashes)))
    for hash_id in remaining_hashes:
        embeddings[hash_id] = np.asarray(glorot_uniform((igor.embedding_size,)).eval())

    glove_name = igor.target_glove[igor.target_glove.find("glove"):].replace("/","")

    hash_vocab.save('hash_embedding_{}.vocab'.format(glove_name))
    with open(path.join(igor.save_dir, "hash_embedding_{}.npy".format(glove_name)), "wb") as fp:
        np.save(fp, embeddings)
    with open(path.join(igor.save_dir, "word2hash.json".format(glove_name)), "w") as fp:
        json.dump(word2hash, fp)
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def from_vocab(igor, vocab):    
    print("using vocab and glove file to generate embedding matrix")
    remaining_vocab = set(vocab.keys())
    embeddings = np.zeros((len(vocab), igor.embedding_size))
    print("{} words to convert".format(len(remaining_vocab)))


    if igor.save_dir[-1] != "/":
        igor.save_dir += "/"
    if not path.exists(igor.save_dir):
        makedirs(igor.save_dir)

    if igor.from_url:
        assert hasattr(glove_urls, igor.target_glove), "You need to specify one of the glove variables"
        url = urlopen(getattr(glove_urls, igor.target_glove))
        fileiter = ZipFile(StringIO(url.read())).open(file).readlines()
    else:
        assert os.path.exists(igor.target_glove), "You need to specify a real file"
        fileiter = open(igor.target_glove).readlines()

    count=0
    for line in tqdm(fileiter):
        line = line.replace("\n","").split(" ")
        try:
            word, nums = line[0], [float(x.strip()) for x in line[1:]]
            if word in remaining_vocab:
                embeddings[vocab[word]]  = np.array(nums)
                remaining_vocab.remove(word)
        except Exception as e:
            print("{} broke. exception: {}. line: {}.".format(word, e, x))
        count+=1


    print("{} words were not in glove; saving to oov.txt".format(len(remaining_vocab)))
    with open(path.join(igor.save_dir, "oov.txt"), "w") as fp:
        fp.write("\n".join(remaining_vocab))

    for word in tqdm(remaining_vocab):
        embeddings[vocab[word]] = np.asarray(glorot_uniform((igor.embedding_size,)).eval())



    vocab.save('embedding.vocab')
    with open(path.join(igor.save_dir, "embedding.npy"), "wb") as fp:
        np.save(fp, embeddings)