Python keras.backend 模块,get_value() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.get_value()

项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_batchrenorm_clipping_schedule():
    '''Test that the clipping schedule isn't fixed at r_max=1, d_max=0'''
    inp = Input(shape=(10,))
    bn = normalization.BatchRenormalization(t_delta=1.)
    out = bn(inp)
    model = Model(inp, out)
    model.compile('sgd', 'mse')

    x = np.random.normal(5, 10, size=(2, 10))
    y = np.random.normal(5, 10, size=(2, 10))

    r_max, d_max = K.get_value(bn.r_max), K.get_value(bn.d_max)
    assert r_max == 1
    assert d_max == 0

    for i in range(10):
        model.train_on_batch(x, y)

    r_max, d_max = K.get_value(bn.r_max), K.get_value(bn.d_max)
    assert_allclose([r_max, d_max], [3, 5], atol=1e-1)
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _runner(init, shape, target_mean=None, target_std=None,
            target_max=None, target_min=None, upper_bound=None, lower_bound=None):
    variable = init(shape)
    if not isinstance(variable, np.ndarray):
        output = K.get_value(variable)
    else:
        output = variable

    lim = 1e-2
    if target_std is not None:
        assert abs(output.std() - target_std) < lim
    if target_mean is not None:
        assert abs(output.mean() - target_mean) < lim
    if target_max is not None:
        assert abs(output.max() - target_max) < lim
    if target_min is not None:
        assert abs(output.min() - target_min) < lim
    if upper_bound is not None:
        assert output.max() < upper_bound
    if lower_bound is not None:
        assert output.min() > lower_bound
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:kr_faster_rcnn    作者:romyny    | 项目源码 | 文件源码
def call(self, x, mask=None):
        logits = x[0]
        labels = x[1]

        if TRAIN_DEBUG:
            print ("logits.shape: {}".format(logits.get_shape()))
            print ("labels.shape: {}".format(labels.get_shape()))


        cross_entropy_with_logits = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                                                   labels=labels)
        softmaxWithLoss = tf.reduce_mean(cross_entropy_with_logits)
        print ("softmaxWithLossLayer, {}: {}".format(self._name, Kb.get_value(softmaxWithLoss)))

        self.output1 = softmaxWithLoss.shape

        if TRAIN_DEBUG:
            #print ("softmaxWithLoss.shape: {}".format(softmaxWithLoss.shape))
            pass

        return softmaxWithLoss

    #"""
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_LearningRateScheduler():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
    assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
项目:deep-learning-essentials    作者:DominicBreuker    | 项目源码 | 文件源码
def convert_weights_theano2tensorflow(model_builder,
                                      theano_weights_file,
                                      tensorflow_weights_file):
    """
    Theano and Tensorflow implement convolutional layers differently.
    This functions transforms pretrained weights for a Theano-based CNN
    to Tensorflow format.
    check out https://github.com/fchollet/keras/wiki/Converting-convolution-kernels-from-Theano-to-TensorFlow-and-vice-versa
    """
    assert K._BACKEND == 'tensorflow'
    model = model_builder(theano_weights_file)
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D',
                                        'Convolution2D',
                                        'Convolution3D',
                                        'AtrousConvolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)

    K.get_session().run(ops)
    model.save_weights(tensorflow_weights_file)
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def on_epoch_begin(self, epoch, logs=None):
        current_lr = float(K.get_value(self.model.optimizer.lr))
        try:
            new_lr = current_lr / self.decay_rate
            if (self.decay_epochs is None) or ((epoch+1) in self.decay_epochs):
                # Decay current learning rate and assign it to the model
                K.set_value(self.model.optimizer.lr, new_lr)
                print('    \nLearning rate decayed by a factor of {}: {:.2E} --> {:.2E}\n'.format(
                    self.decay_rate,
                    current_lr,
                    new_lr
                )
                )
        except TypeError:
            raise ValueError('Decay rate for LRDecayScheduler must be a number.\n'
                             'Decay epochs for LRDecayScheduler must be a list of numbers.')
项目:keras-neural-tensor-layer    作者:dapurv5    | 项目源码 | 文件源码
def main():
  input1 = Input(shape=(64,), dtype='float32')
  input2 = Input(shape=(64,), dtype='float32')
  btp = NeuralTensorLayer(output_dim=32, input_dim=64)([input1, input2])

  p = Dense(output_dim=1)(btp)
  model = Model(input=[input1, input2], output=[p])

  sgd = SGD(lr=0.0000000001, decay=1e-6, momentum=0.9, nesterov=True)
  model.compile(loss='mean_squared_error', optimizer=sgd)
  X_train, Y_train, X_test, Y_test = get_data()
  X_train = X_train.astype(np.float32)
  Y_train = Y_train.astype(np.float32)
  X_test = X_test.astype(np.float32)
  Y_test = Y_test.astype(np.float32)

  model.fit([X_train, X_train], Y_train, nb_epoch=50, batch_size=5)
  score = model.evaluate([X_test, X_test], Y_test, batch_size=1)
  print score

  print K.get_value(model.layers[2].W)
项目:RocAlphaGo    作者:Rochester-NRT    | 项目源码 | 文件源码
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        ### THE UPDATED CALCULATION ###
        lr = self.lr * (1.0 / (1.0 + self.decay))
        self.updates = [(self.iterations, self.iterations + 1.)]
        for p, g, c in zip(params, grads, constraints):
            m = K.variable(np.zeros(K.get_value(p).shape))  # momentum
            v = self.momentum * m - lr * g  # velocity
            self.updates.append((m, v))

            if self.nesterov:
                new_p = p + self.momentum * v - lr * g
            else:
                new_p = p + v

            self.updates.append((p, c(new_p)))  # apply constraints
        return self.updates
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.input_dim = input_shape[2]

        self.W = self.init((self.output_dim, 4 * self.input_dim),
                           name='{}_W'.format(self.name))
        self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
                                 name='{}_U'.format(self.name))
        self.b = K.variable(np.hstack((np.zeros(self.input_dim),
                                       K.get_value(self.forget_bias_init((self.input_dim,))),
                                       np.zeros(self.input_dim),
                                       np.zeros(self.input_dim))),
                            name='{}_b'.format(self.name))

        self.A = self.init((self.input_dim, self.output_dim),
                            name='{}_A'.format(self.name))
        self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


        self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):

        if epoch in self.predefined_epochs or -1 in self.predefined_epochs:
            lr = K.get_value(self.model.optimizer.lr) / self.decay_rate
            K.set_value(self.model.optimizer.lr, lr)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        self.lr_init = K.get_value(self.model.optimizer.lr)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        self.lr_init = K.get_value(self.model.optimizer.lr)
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs):
        logs['learning_rate'] = K.get_value(self.model.optimizer.lr)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _build(self,input_shape):
        data_dim = np.prod(input_shape) 
        self.gs = self.build_gs()
        self.gs2 = self.build_gs(N=data_dim)
        self.gs3 = self.build_gs(N=data_dim)

        _encoder = self.build_encoder(input_shape)
        _decoder = self.build_decoder(input_shape)

        x = Input(shape=input_shape)
        z = Sequential([flatten, *_encoder, self.gs])(x)
        y = Sequential([flatten,
                        *_decoder,
                        self.gs2,
                        Lambda(take_true),
                        Reshape(input_shape)])(z)

        z2 = Input(shape=(self.parameters['N'], self.parameters['M']))
        y2 = Sequential([flatten,
                        *_decoder,
                        self.gs3,
                        Lambda(take_true),
                        Reshape(input_shape)])(z2)

        def rec(x, y):
            return bce(K.reshape(x,(K.shape(x)[0],data_dim,)),
                       K.reshape(y,(K.shape(x)[0],data_dim,)))
        def loss(x, y):
            return rec(x,y) + self.gs.loss() + self.gs2.loss()

        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs2.cool))
        self.callbacks.append(LambdaCallback(on_epoch_end=self.gs3.cool))
        self.custom_log_functions['tau'] = lambda: K.get_value(self.gs.tau)
        self.loss = loss
        self.metrics.append(rec)
        self.encoder     = Model(x, z)
        self.decoder     = Model(z2, y2)
        self.net = Model(x, y)
        self.autoencoder = self.net
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def _runner(init, shape, target_mean=None, target_std=None,
            target_max=None, target_min=None):
    variable = init(shape)
    output = K.get_value(variable)
    lim = 1e-2
    if target_std is not None:
        assert abs(output.std() - target_std) < lim
    if target_mean is not None:
        assert abs(output.mean() - target_mean) < lim
    if target_max is not None:
        assert abs(output.max() - target_max) < lim
    if target_min is not None:
        assert abs(output.min() - target_min) < lim
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_ReduceLROnPlateau():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def make_model():
        np.random.seed(1337)
        model = Sequential()
        model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(nb_class, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=0.1),
                      metrics=['accuracy'])
        return model

    model = make_model()

    # This should reduce the LR after the first epoch (due to high epsilon).
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())

    model = make_model()
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_save_and_load_all_weights():
    '''
    Test save_all_weights and load_all_weights. Save and load optimizer and model weights but not configuration.
    '''

    def make_model():
        _x = Input((10,))
        _y = Dense(10)(_x)
        _m = Model(_x, _y)
        _m.compile('adam', 'mean_squared_error')
        _m._make_train_function()
        return _m

    # make a model
    m1 = make_model()
    # set weights
    w1 = m1.layers[1].kernel  # dense layer
    w1value = K.get_value(w1)
    w1value[0, 0:4] = [1, 3, 3, 7]
    K.set_value(w1, w1value)
    # set optimizer weights
    ow1 = m1.optimizer.weights[3]  # momentum weights
    ow1value = K.get_value(ow1)
    ow1value[0, 0:3] = [4, 2, 0]
    K.set_value(ow1, ow1value)
    # save all weights
    save_all_weights(m1, 'model.h5')
    # new model
    m2 = make_model()
    # load all weights
    load_all_weights(m2, 'model.h5')
    # check weights
    assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7])
    # check optimizer weights
    assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0])
    os.remove('model.h5')
项目:deer    作者:VinF    | 项目源码 | 文件源码
def getAllParams(self):
        params_value=[]
        for i,p in enumerate(self.params):
            params_value.append(K.get_value(p))
        for i,p in enumerate(self.params_policy):
            params_value.append(K.get_value(p))

        return params_value
项目:deer    作者:VinF    | 项目源码 | 文件源码
def _resetQHat(self):
        for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
            K.set_value(next_param,K.get_value(param))
项目:deer    作者:VinF    | 项目源码 | 文件源码
def getAllParams(self):
        params_value=[]
        for i,p in enumerate(self.params):
            params_value.append(K.get_value(p))
        return params_value
项目:deer    作者:VinF    | 项目源码 | 文件源码
def _resetQHat(self):
        for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
            K.set_value(next_param,K.get_value(param))

        self._compile() # recompile to take into account new optimizer parameters that may have changed since
                        # self._compile() was called in __init__. FIXME: this call should ideally be done elsewhere
项目:variants_of_rmsprop_and_adagrad    作者:mmahesh    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'xi_1': float(K.get_value(self.xi_1)),
                  'xi_2': float(K.get_value(self.xi_2)),
                  'decay': float(K.get_value(self.decay))}
        base_config = super(SC_Adagrad, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:variants_of_rmsprop_and_adagrad    作者:mmahesh    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'xi_1': float(K.get_value(self.xi_1)),
                  'xi_2': float(K.get_value(self.xi_2)),
                  'gamma': float(K.get_value(self.gamma)),
                  'decay': float(K.get_value(self.decay))}
        base_config = super(SC_RMSProp, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:variants_of_rmsprop_and_adagrad    作者:mmahesh    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'delta': float(K.get_value(self.delta)),
                  'gamma': float(K.get_value(self.gamma)),
                  'decay': float(K.get_value(self.decay))}
        base_config = super(RMSProp_variant, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:pyannote-audio    作者:pyannote    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'decay': float(K.get_value(self.decay)),
                  'epsilon': self.epsilon}
        base_config = super(SMORMS3, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


# register user-defined Keras optimizer
项目:aetros-cli    作者:aetros    | 项目源码 | 文件源码
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr']
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'beta_3': float(K.get_value(self.beta_3)),
                  'small_k': float(K.get_value(self.small_k)),
                  'big_K': float(K.get_value(self.big_K)),
                  'epsilon': self.epsilon}
        base_config = super(Eve, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'beta_1': float(K.get_value(self.beta_1)),
                  'beta_2': float(K.get_value(self.beta_2)),
                  'epsilon': self.epsilon,
                  'schedule_decay': self.schedule_decay,
                  'accum_iters': float(K.get_value(self.accum_iters))}
        base_config = super(NadamAccum, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def get_config(self):
        config = {'lr': float(K.get_value(self.lr)),
                  'lr_natGrad': float(K.get_value(self.lr_natGrad)),
                  'rho': float(K.get_value(self.rho)),
                  'epsilon': self.epsilon}
        base_config = super(RMSprop_and_natGrad, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_clone_optimizer():
    lr, momentum, clipnorm, clipvalue = np.random.random(size=4)
    optimizer = SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=clipvalue)
    clone = clone_optimizer(optimizer)

    assert isinstance(clone, SGD)
    assert K.get_value(optimizer.lr) == K.get_value(clone.lr)
    assert K.get_value(optimizer.momentum) == K.get_value(clone.momentum)
    assert optimizer.clipnorm == clone.clipnorm
    assert optimizer.clipvalue == clone.clipvalue
项目:keras_text_classifier    作者:cdj0311    | 项目源码 | 文件源码
def th2tf( model):
    import tensorflow as tf
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    K.get_session().run(ops)
    return model
项目:keras_text_classifier    作者:cdj0311    | 项目源码 | 文件源码
def tf2th(model):
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            K.set_value(layer.W, converted_w)
    return model
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def _runner(init, shape, target_mean=None, target_std=None,
            target_max=None, target_min=None):
    variable = init(shape)
    output = K.get_value(variable)
    lim = 1e-2
    if target_std is not None:
        assert abs(output.std() - target_std) < lim
    if target_mean is not None:
        assert abs(output.mean() - target_mean) < lim
    if target_max is not None:
        assert abs(output.max() - target_max) < lim
    if target_min is not None:
        assert abs(output.min() - target_min) < lim
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_ReduceLROnPlateau():
    (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
                                                         nb_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         nb_class=nb_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    def make_model():
        np.random.seed(1337)
        model = Sequential()
        model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
        model.add(Dense(nb_class, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizers.SGD(lr=0.1),
                      metrics=['accuracy'])
        return model

    model = make_model()

    # This should reduce the LR after the first epoch (due to high epsilon).
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())

    model = make_model()
    cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
    assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
项目:kaggle-allstate-claims-severity    作者:alno    | 项目源码 | 文件源码
def on_train_begin(self, logs={}):
        self.sym_trainable_weights = collect_trainable_weights(self.model)
        # Initialize moving averaged weights using original model values
        self.mv_trainable_weights_vals = {x.name: K.get_value(x) for x in
                                          self.sym_trainable_weights}
        if self.verbose:
            print('Created a copy of model weights to initialize moving'
                  ' averaged weights.')
项目:kaggle-allstate-claims-severity    作者:alno    | 项目源码 | 文件源码
def on_batch_end(self, batch, logs={}):
        for weight in self.sym_trainable_weights:
            old_val = self.mv_trainable_weights_vals[weight.name]
            self.mv_trainable_weights_vals[weight.name] -= \
                (1.0 - self.decay) * (old_val - K.get_value(weight))
项目:Kaggle_Buddy    作者:NickYi1990    | 项目源码 | 文件源码
def _scheduler(self, epoch):
        if epoch%self.decay_after_n_epoch==0 and epoch!=0:
            lr = K.get_value(self.model.optimizer.lr)
            K.set_value(self.model.optimizer.lr, lr*self.decay_rate)
            print("lr changed to {}".format(lr*self.decay_rate))
        return K.get_value(self.model.optimizer.lr)
项目:c2w2c    作者:milankinen    | 项目源码 | 文件源码
def save_states(self):
    states = []
    for lstm in self._lstms:
      states.append([np.copy(K.get_value(s)) for s in lstm.states])
    self._saved_states = states
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def InitNormal(loc=0.0, scale=0.002):
    def initf(model):
        for w in model.weights:
            if w.name.startswith('conv2d') or w.name.startswith('dense'):
                if 'kernel' in w.name:
                    print 'init weight', w.name
                    value = np.random.normal(loc=loc, scale=scale, size=K.get_value(w).shape)
                    K.set_value(w, value.astype('float32'))
    return initf
项目:Keras-GAN    作者:Shaofanl    | 项目源码 | 文件源码
def InitNormal(loc=0.0, scale=0.002):
    def initf(model):
        for w in model.weights:
            if w.name.startswith('conv2d') or w.name.startswith('dense'):
                if w.name.endswith('kernel'):
                    value = np.random.normal(loc=loc, scale=scale, size=K.get_value(w).shape)
                    K.set_value(w, value.astype('float32'))
#               if w.name.endswith('bias'):
#                   value = np.zeros(K.get_value(w).shape)
#                   K.set_value(w, value.astype('float32'))
    return initf
项目:kchar    作者:jarfo    | 项目源码 | 文件源码
def fit_generator(self, generator, steps_per_epoch, epochs, validation_data, validation_steps, opt):
        val_losses = []
        lr = K.get_value(self.optimizer.lr)
        for epoch in range(epochs):
            super(sModel, self).fit_generator(generator, steps_per_epoch, epochs=epoch+1, verbose=1, initial_epoch=epoch)
            val_loss = exp(self.evaluate_generator(validation_data, validation_steps))
            val_losses.append(val_loss)
            print 'Epoch {}/{}. Validation loss: {}'.format(epoch + 1, epochs, val_loss)
            if len(val_losses) > 2 and (val_losses[-2] - val_losses[-1]) < opt.decay_when:
                lr *= opt.learning_rate_decay
                K.set_value(self.optimizer.lr, lr)
            if epoch == epochs-1 or epoch % opt.save_every == 0:
                savefile = '%s/lm_%s_epoch%d_%.2f.h5' % (opt.checkpoint_dir, opt.savefile, epoch + 1, val_loss)
                self.save_weights(savefile)