Python keras.backend 模块,set_value() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.set_value()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def reset_states(self, states=None):
        if states is None:
            self.recurrent_layer.reset_states(states)
        else:
            self.recurrent_layer.reset_states(states[:-1])

        batch_size = self.recurrent_layer.input_spec[0].shape[0]
        if self.dense_state is None:
            self.dense_state = K.zeros((
                batch_size,
                self.dense_layer.units
            ))
        elif states is None:
            K.set_value(
                self.dense_state,
                np.zeros((batch_size, self.dense_layer.units))
            )
        else:
            K.set_value(
                self.dense_state,
                states[-1]
            )
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def set_lr(self, lr):
        """Set the learning rate of the wrapped models.

        We try to set the learning rate on a member variable model and a member
        variable small. If we do not find a member variable model we raise a
        NotImplementedError
        """
        try:
            K.set_value(
                self.model.optimizer.lr,
                lr
            )
        except AttributeError:
            raise NotImplementedError()

        try:
            K.set_value(
                self.small.optimizer.lr,
                lr
            )
        except AttributeError:
            pass
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def set_lr(self, lr):
        """Set the learning rate of the wrapped models.

        We try to set the learning rate on a member variable model and a member
        variable small. If we do not find a member variable model we raise a
        NotImplementedError
        """
        try:
            K.set_value(
                self.model.optimizer.lr,
                lr
            )
        except AttributeError:
            raise NotImplementedError()

        try:
            K.set_value(
                self.small.optimizer.lr,
                lr
            )
        except AttributeError:
            pass
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def set_lr(self, lr):
        """Set the learning rate of the wrapped models.

        We try to set the learning rate on a member variable model and a member
        variable small. If we do not find a member variable model we raise a
        NotImplementedError
        """
        try:
            K.set_value(
                self.model.optimizer.lr,
                lr
            )
        except AttributeError:
            raise NotImplementedError()

        try:
            K.set_value(
                self.small.optimizer.lr,
                lr
            )
        except AttributeError:
            pass
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def set_lr(self, lr):
        """Set the learning rate of the wrapped models.

        We try to set the learning rate on a member variable model and a member
        variable small. If we do not find a member variable model we raise a
        NotImplementedError
        """
        try:
            K.set_value(
                self.model.optimizer.lr,
                lr
            )
        except AttributeError:
            raise NotImplementedError()

        try:
            K.set_value(
                self.small.optimizer.lr,
                lr
            )
        except AttributeError:
            pass
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:Word2Vec    作者:hashbangCoder    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
            current = logs.get(self.monitor)
            if current is None:
                warnings.warn('Early stopping requires %s available!' % (self.monitor), RuntimeWarning)

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            else:
                if (self.wait == self.patience - 1) and self.anneal:
                        print 'Halving Learning Rate...'
                        K.set_value(self.model.optimizer.lr, K.get_value(self.model.optimizer.lr)/2)

                elif self.wait >= self.patience:
                    print('Epoch %d: early stopping' % (epoch))
                    self.model.stop_training = True
                self.wait += 1
项目:LSTM-GRU-CNN-MLP    作者:ansleliu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if self.go_backwards:
            initial_time = self.input_spec[0].shape[1]
        else:
            initial_time = 0.

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1], initial_time)
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)), K.variable(initial_time)]
项目:keras-prednet    作者:kunimasa-kawasaki    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided ' +
                            '(including batch size).')

        if self.return_sequences:
            out_row, out_col, out_filter = self.output_shape[2:]
        else:
            out_row, out_col, out_filter = self.output_shape[1:]

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0],
                                  out_row, out_col, out_filter)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0],
                                  out_row, out_col, out_filter)))
        else:
            self.states = [K.zeros((input_shape[0],
                                    out_row, out_col, out_filter)),
                           K.zeros((input_shape[0],
                                    out_row, out_col, out_filter))]
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def on_epoch_begin(self, epoch, logs=None):
        current_lr = float(K.get_value(self.model.optimizer.lr))
        try:
            new_lr = current_lr / self.decay_rate
            if (self.decay_epochs is None) or ((epoch+1) in self.decay_epochs):
                # Decay current learning rate and assign it to the model
                K.set_value(self.model.optimizer.lr, new_lr)
                print('    \nLearning rate decayed by a factor of {}: {:.2E} --> {:.2E}\n'.format(
                    self.decay_rate,
                    current_lr,
                    new_lr
                )
                )
        except TypeError:
            raise ValueError('Decay rate for LRDecayScheduler must be a number.\n'
                             'Decay epochs for LRDecayScheduler must be a list of numbers.')
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, it needs to know '
                            'its batch size. Specify the batch size '
                            'of your input tensors: \n'
                            '- If using a Sequential model, '
                            'specify the batch size by passing '
                            'a `batch_input_shape` '
                            'argument to your first layer.\n'
                            '- If using the functional API, specify '
                            'the time dimension by passing a '
                            '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:New_Layers-Keras-Tensorflow    作者:WeidiXie    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, it needs to know '
                            'its batch size. Specify the batch size '
                            'of your input tensors: \n'
                            '- If using a Sequential model, '
                            'specify the batch size by passing '
                            'a `batch_input_shape` '
                            'argument to your first layer.\n'
                            '- If using the functional API, specify '
                            'the time dimension by passing a '
                            '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:ikelos    作者:braingineer    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if self.go_backwards:
            initial_time = self.input_spec[0].shape[1]
        else:
            initial_time = 0.

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1], initial_time)
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)), K.variable(initial_time)]
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape

        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')

        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.hidden_recurrent_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.input_dim)))
            K.set_value(self.states[2],
                        np.zeros((input_shape[0], self.hidden_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)),
                            K.zeros((input_shape[0], self.input_dim)),
                            K.zeros((input_shape[0], self.hidden_dim))]
项目:keras_bn_library    作者:bnsnapper    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, it needs to know '
                             'its batch size. Specify the batch size '
                             'of your input tensors: \n'
                             '- If using a Sequential model, '
                             'specify the batch size by passing '
                             'a `batch_input_shape` '
                             'argument to your first layer.\n'
                             '- If using the functional API, specify '
                             'the time dimension by passing a '
                             '`batch_shape` argument to your Input layer.')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.input_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.input_dim)),
                            K.zeros((input_shape[0], self.output_dim))]
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):

        if epoch in self.predefined_epochs or -1 in self.predefined_epochs:
            lr = K.get_value(self.model.optimizer.lr) / self.decay_rate
            K.set_value(self.model.optimizer.lr, lr)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        if epoch > self.epoch_n:
            ratio = 1.0 * (self.num_epoch - epoch)  # epoch_n + 1 because learning rate is set for next epoch
            ratio = max(0, ratio / (self.num_epoch - self.epoch_n))
            lr = np.float32(self.lr_init * ratio)
            K.set_value(self.model.optimizer.lr,lr)
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        if epoch > self.epoch_n:
            ratio = 1.0 - self.decay*(epoch - self.epoch_n )
            ratio = max(0, ratio)
            lr = np.float32(self.lr_init * ratio)
            K.set_value(self.model.optimizer.lr, lr)
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:recurrent-attention-for-QA-SQUAD-based-on-keras    作者:wentaozhu    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise ValueError('If a RNN is stateful, a complete '
                             'input_shape must be provided '
                             '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.units)))
        else:
            self.states = [K.zeros((input_shape[0], self.units))]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def cool(self, epoch, logs):
        K.set_value(
            self.tau,
            np.max([self.min,
                    self.max * np.exp(- self.anneal_rate * epoch)]))
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _load(self):
        super()._load()
        K.set_value(self.c, self.parameters['c'])
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def train(self,train_data,
              batch_size=1000,
              save=True,
              train_data_to=None,
              test_data=None,
              test_data_to=None,
              **kwargs):
        super().train(train_data,
                      batch_size=batch_size,
                      train_data_to=train_data_to,
                      test_data=test_data,
                      test_data_to=test_data_to,
                      save=False,
                      **kwargs)

        s = self.net.predict(test_data[test_data_to == 1],batch_size=batch_size)
        if np.count_nonzero(test_data_to == 1) > 0:
            c = s.mean()
            print("PU constant c =", c)
            K.set_value(self.c, c)
            self.parameters['c'] = float(c)
            # prevent saving before setting c
            if save:
                self.save()
        else:
            raise Exception("there are no positive data in the validation set; Training failed.")
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def on_epoch_begin(self, epoch, logs={}):
        old_lr = self.model.optimizer.lr.get_value()
        if epoch > 1 and epoch % self.n_epoch == 0:
            new_lr = self.decay * old_lr
            k.set_value(self.model.optimizer.lr, new_lr)
        else:
            k.set_value(self.model.optimizer.lr, old_lr)


# keras integrated
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        loss = logs.items()[1][1]  # get loss
        print "loss: ", loss
        old_lr = self.model.optimizer.lr.get_value()  # get old lr
        new_lr = old_lr * np.exp(loss)  # lr*exp(loss)
        k.set_value(self.model.optimizer.lr, new_lr)


# decaylr=LearningRateScheduler(decay_sch)


# checkpoint=ModelCheckpoint("weights/adam_noep{0}_batch{1}_seq_{2}.hdf5".format(\
# no_epochs,batch, seq_length), monitor='loss', verbose=0,
# save_best_only=True, save_weights_only=False, mode='min')
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def on_epoch_begin(self, epoch, logs={}):
        assert hasattr(self.model.optimizer, 'lr'), \
            'Optimizer must have a "lr" attribute.'
        lr = self.schedule(epoch)

        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')

        K.set_value(self.model.optimizer.lr, lr)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def test_save_and_load_all_weights():
    '''
    Test save_all_weights and load_all_weights. Save and load optimizer and model weights but not configuration.
    '''

    def make_model():
        _x = Input((10,))
        _y = Dense(10)(_x)
        _m = Model(_x, _y)
        _m.compile('adam', 'mean_squared_error')
        _m._make_train_function()
        return _m

    # make a model
    m1 = make_model()
    # set weights
    w1 = m1.layers[1].kernel  # dense layer
    w1value = K.get_value(w1)
    w1value[0, 0:4] = [1, 3, 3, 7]
    K.set_value(w1, w1value)
    # set optimizer weights
    ow1 = m1.optimizer.weights[3]  # momentum weights
    ow1value = K.get_value(ow1)
    ow1value[0, 0:3] = [4, 2, 0]
    K.set_value(ow1, ow1value)
    # save all weights
    save_all_weights(m1, 'model.h5')
    # new model
    m2 = make_model()
    # load all weights
    load_all_weights(m2, 'model.h5')
    # check weights
    assert_allclose(K.get_value(m2.layers[1].kernel)[0, 0:4], [1, 3, 3, 7])
    # check optimizer weights
    assert_allclose(K.get_value(m2.optimizer.weights[3])[0, 0:3], [4, 2, 0])
    os.remove('model.h5')
项目:deer    作者:VinF    | 项目源码 | 文件源码
def setAllParams(self, list_of_values):
        for i,p in enumerate(self.params):
            K.set_value(p,list_of_values[i])
        for j,p in enumerate(self.params_policy):
            K.set_value(p,list_of_values[j+i+1])
项目:deer    作者:VinF    | 项目源码 | 文件源码
def _resetQHat(self):
        for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
            K.set_value(next_param,K.get_value(param))
项目:deer    作者:VinF    | 项目源码 | 文件源码
def setAllParams(self, list_of_values):
        for i,p in enumerate(self.params):
            K.set_value(p,list_of_values[i])
项目:deer    作者:VinF    | 项目源码 | 文件源码
def _resetQHat(self):
        for i,(param,next_param) in enumerate(zip(self.params, self.next_params)):
            K.set_value(next_param,K.get_value(param))

        self._compile() # recompile to take into account new optimizer parameters that may have changed since
                        # self._compile() was called in __init__. FIXME: this call should ideally be done elsewhere
项目:NTM-Keras    作者:SigmaQuan    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)),
                           K.zeros((input_shape[0], self.output_dim))]
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def on_batch_end(self, epoch, logs={}):
        if not isinstance(self.model, GandlfModel):
            raise ValueError('The AdaptiveLearningRate callback only works '
                             'for Gandlf models.')

        if (not hasattr(self.model.gen_optimizer, 'lr') or
                not hasattr(self.model.dis_optimizer, 'lr')):
            raise ValueError('To use the Adaptive Learning Rate callback, '
                             'both the generator and discriminator optimizers '
                             'must have an "lr" attribute.')

        gen_loss, dis_loss = 0., 0.
        for key, val in logs.items():
            if key.endswith('gen_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                gen_loss += val
            elif key.endswith('real_loss') or key.endswith('fake_loss'):
                if val < 0:
                    raise ValueError('The adaptive learning rate callback '
                                     'doesn\'t work for negative losses.')
                dis_loss += val

        dis_loss /= 2  # Double-counting real and fake data.
        total_loss = gen_loss + dis_loss + 1e-12
        gen_pct, dis_pct = gen_loss / total_loss, dis_loss / total_loss

        # Calculates the percentage to weight each one.
        generator_lr = self.generator_lr * gen_pct
        discriminator_lr = self.discriminator_lr * dis_pct

        # Updates the learning rates on both.
        K.set_value(self.model.gen_optimizer.lr, generator_lr)
        K.set_value(self.model.dis_optimizer.lr, discriminator_lr)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def on_epoch_begin(self, epoch, logs=None):
        if not hasattr(self.model.optimizer, 'lr'):
            raise ValueError('Optimizer must have a "lr" attribute.')
        lr = self.schedule(epoch)
        if not isinstance(lr, (float, np.float32, np.float64)):
            raise ValueError('The output of the "schedule" function '
                             'should be float.')
        K.set_value(self.model.optimizer.lr, lr)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        logs['lr'] = K.get_value(self.model.optimizer.lr)
        current = logs.get(self.monitor)
        if current is None:
            warnings.warn('Learning Rate Plateau Reducing requires %s available!' %
                          self.monitor, RuntimeWarning)
        else:
            if self.in_cooldown():
                self.cooldown_counter -= 1
                self.wait = 0

            if self.monitor_op(current, self.best):
                self.best = current
                self.wait = 0
            elif not self.in_cooldown():
                if self.wait >= self.patience:
                    old_lr = float(K.get_value(self.model.optimizer.lr))
                    if old_lr > self.min_lr + self.lr_epsilon:
                        new_lr = old_lr * self.factor
                        new_lr = max(new_lr, self.min_lr)
                        K.set_value(self.model.optimizer.lr, new_lr)
                        if self.verbose > 0:
                            print('\nEpoch %05d: reducing learning rate to %s.' % (epoch, new_lr))
                        self.cooldown_counter = self.cooldown
                        self.wait = 0
                self.wait += 1
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def dlik_dh(self, value):
        K.set_value(self._dlik_dh, value)
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def batch_ids(self, value):
        K.set_value(self._batch_ids, value)
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def batch_sz(self, value):
        K.set_value(self._batch_sz, value)
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def nlml(self, value):
        K.set_value(self._nlml, value)
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def mse(self, value):
        K.set_value(self._mse, value)
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful or self.state_input or len(self.state_outputs) > 0, 'Layer must be stateful.'
        input_shape = self.input_shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided ' +
                            '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.hidden_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.hidden_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.hidden_dim)),
                           K.zeros((input_shape[0], self.hidden_dim))]
项目:triplets-extraction    作者:zsctju    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful or self.state_input or len(self.state_outputs) > 0, 'Layer must be stateful.'
        input_shape = self.input_shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided ' +
                            '(including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.hidden_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.hidden_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.hidden_dim)),
                           K.zeros((input_shape[0], self.hidden_dim))]
项目:rl    作者:Shmuma    | 项目源码 | 文件源码
def check(self):
        if not os.path.exists(self.file_name):
            return

        self.logger.info("Tweak file detected: %s", self.file_name)
        with open(self.file_name, "rt", encoding='utf-8') as fd:
            for idx, l in enumerate(fd):
                name, val = list(map(str.strip, l.split('=', maxsplit=2)))
                var = self.params.get(name)
                if not var:
                    self.logger.info("Unknown param '%s' found in file at line %d, ignored", name, idx+1)
                    continue
                self.logger.info("Param %s <-- %s", name, val)
                K.set_value(var, float(val))
        os.remove(self.file_name)
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim))]
项目:Modified-LSTM    作者:jingweimo    | 项目源码 | 文件源码
def on_epoch_end(self,epoch,logs={}):
        loss=logs.items()[1][1] 
        #loss=logs.get('val_loss')
        print('loss: ', loss)
        old_lr = 0.0001 #from 1e-3 to 1e-4 !! very important
        new_lr= old_lr*np.exp(loss) 
        print('New learning rate: ', new_lr)
        K.set_value(self.model.optimizer.lr, new_lr)