Python keras.backend 模块,learning_phase() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.learning_phase()

项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[33].output])
    return _convout1_f([0] + [im])
项目:CNNGestureRecognizer    作者:asingh33    | 项目源码 | 文件源码
def visualizeLayer(model, img, input_image, layerIndex):

    layer = model.layers[layerIndex]

    get_activations = K.function([model.layers[0].input, K.learning_phase()], [layer.output,])
    activations = get_activations([input_image, 0])[0]
    output_image = activations


    ## If 4 dimensional then take the last dimension value as it would be no of filters
    if output_image.ndim == 4:
        # Rearrange dimension so we can plot the result
        o1 = np.rollaxis(output_image, 3, 1)
        output_image = np.rollaxis(o1, 3, 1)

        print "Dumping filter data of layer{} - {}".format(layerIndex,layer.__class__.__name__)
        filters = len(output_image[0,0,0,:])

        fig=plt.figure(figsize=(8,8))
        # This loop will plot the 32 filter data for the input image
        for i in range(filters):
            ax = fig.add_subplot(6, 6, i+1)
            #ax.imshow(output_image[img,:,:,i],interpolation='none' ) #to see the first filter
            ax.imshow(output_image[0,:,:,i],'gray')
            #ax.set_title("Feature map of layer#{} \ncalled '{}' \nof type {} ".format(layerIndex,
            #                layer.name,layer.__class__.__name__))
            plt.xticks(np.array([]))
            plt.yticks(np.array([]))
        plt.tight_layout()
        #plt.show()
        fig.savefig("img_" + str(img) + "_layer" + str(layerIndex)+"_"+layer.__class__.__name__+".png")
        #plt.close(fig)
    else:
        print "Can't dump data of this layer{}- {}".format(layerIndex, layer.__class__.__name__)
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_feature_map_4(model, im):
    im = im.astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[23].output])
    feature_map = _convout1_f([0] + [im])
    feature_map = np.array([feature_map])
    feature_map = feature_map[0, 0, 0, :, :, :]
    return feature_map
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_conv_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[31].output])
    return _convout1_f([0] + [im])
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def _input_dictionary_for_loss_net(self, labeled_spectrogram_batch: List[LabeledSpectrogram]) -> Dict[str, ndarray]:
        spectrograms = [x.z_normalized_transposed_spectrogram() for x in labeled_spectrogram_batch]
        labels = [x.label for x in labeled_spectrogram_batch]
        input_batch, prediction_lengths = self._input_batch_and_prediction_lengths(spectrograms)

        # Sets learning phase to training to enable dropout (see backend.learning_phase documentation for more info):
        training_phase_flag_tensor = array([True])
        label_lengths = reshape(array([len(label) for label in labels]), (len(labeled_spectrogram_batch), 1))
        return {
            Wav2Letter.InputNames.input_batch: input_batch,
            Wav2Letter.InputNames.prediction_lengths: self._prediction_length_batch(prediction_lengths,
                                                                                    batch_size=len(spectrograms)),
            Wav2Letter.InputNames.label_batch: self.grapheme_encoding.encode_label_batch(labels),
            Wav2Letter.InputNames.label_lengths: label_lengths,
            'keras_learning_phase': training_phase_flag_tensor
        }
项目:facejack    作者:PetarV-    | 项目源码 | 文件源码
def __init__(self, mdl, x):
        self.loss_value = None
        self.grad_values = None
        self.mdl = mdl

        loss = K.variable(0.)
        layer_dict = dict([(layer.name, layer) for layer in mdl.layers])

        inp = layer_dict['face'].output
        out = layer_dict['conf'].output

        loss -= K.sum(out)
        # Might want to add some L2-loss in here, depending on output
        # loss += 0.0005 * K.sum(K.square(inp - x))
        grads = K.gradients(loss, inp)

        outputs = [loss]
        if type(grads) in {list, tuple}:
            outputs += grads
        else:
            outputs.append(grads)

        self.f_outputs = K.function([inp, K.learning_phase()], outputs)
项目:dogsVScats    作者:prajwalkr    | 项目源码 | 文件源码
def visualize(model, layer_name):
    print 'Model loaded.'
    layer_dict = dict([(layer.name, layer) for layer in model.layers])

    for filter_index in sample(range(0, layer_dict[layer_name].nb_filter),10):
        layer_output = layer_dict[layer_name].output
        loss = K.mean(layer_output[:, filter_index, :, :])
        grads = K.gradients(loss, model.layers[0].input)[0]
        grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
        iterate = K.function([model.layers[0].input, K.learning_phase()], [loss, grads])

        input_img_data = np.asarray([read_image('visimage.jpg')])

        for _ in xrange(100):
            loss_value, grads_value = iterate([input_img_data, 0])
            input_img_data += grads_value * 3

        img = deprocess_image(input_img_data[0])
        write_image(img, '../activations/out{}.jpg'.format(filter_index))
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_gradcam(image,model,layer_name,mode):
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    if mode == "abnormal":
        loss += K.sum(model.output)
    elif mode == "normal":
        loss += K.sum(1 - model.output)
    else:
        raise ValueError("mode must be normal or abnormal")
    #gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss,layer.output)[0]
    feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
    return fetch_heatmap([image,0])[0]
项目:vinci    作者:Phylliade    | 项目源码 | 文件源码
def forward(self, observation):
        # Select an action.
        # [state] is the unprocessed version of a batch
        batch_state = [observation]
        # We get a batch of 1 action
        # action = self.actor.predict_on_batch(batch_state)[0]
        action = self.session.run(
            self.actor(self.variables["state"]),
            feed_dict={
                self.variables["state"]: batch_state,
                K.learning_phase(): 0
            })[0]
        assert action.shape == (self.nb_actions, )

        # Apply noise, if a random process is set.
        if self.exploration and self.random_process is not None:
            noise = self.random_process.sample()
            assert noise.shape == action.shape
            action += noise

        # Clip the action value, even if the noise is making it exceed its bounds
        action = np.clip(action, self.actions_low, self.actions_high)
        return action

        return (action)
项目:kaos    作者:RuiShu    | 项目源码 | 文件源码
def _define_io_loss_xy(self):
        u, p, q, s = {}, {}, {}, {}
        x, y = Input(shape=(784,)), Input(shape=(10,))
        q['z'], s['z'], p['x'] = self.xy_graph(x, y)
        u['x'] = self.u_net['x'](x)
        q['y'] = self.q_net['y'](u['x'])

        def alpha_loss(y, y_param):
            return K.categorical_crossentropy(q['y'], y)

        def xy_loss(x, x_param):
            return self.labeled_loss(x, q['z'], s['z'], p['x'])

        self._predict = K.function([x, K.learning_phase()], q['y'])
        return self._standardize_io_loss([x, y],
                                         [q['y'], p['x']],
                                         [alpha_loss, xy_loss])
项目:keras-attention-mechanism    作者:philipperemy    | 项目源码 | 文件源码
def get_activations(model, inputs, print_shape_only=False, layer_name=None):
    # Documentation is available online on Github at the address below.
    # From: https://github.com/philipperemy/keras-visualize-activations
    print('----- activations -----')
    activations = []
    inp = model.input
    if layer_name is None:
        outputs = [layer.output for layer in model.layers]
    else:
        outputs = [layer.output for layer in model.layers if layer.name == layer_name]  # all layer outputs
    funcs = [K.function([inp] + [K.learning_phase()], [out]) for out in outputs]  # evaluation functions
    layer_outputs = [func([inputs, 1.])[0] for func in funcs]
    for layer_activations in layer_outputs:
        activations.append(layer_activations)
        if print_shape_only:
            print(layer_activations.shape)
        else:
            print(layer_activations)
    return activations
项目:sample-cnn    作者:tae-jun    | 项目源码 | 文件源码
def _make_tfrecord_train_function(self):
    if not hasattr(self, 'train_function'):
      raise RuntimeError('You must compile your model before using it.')
    if self.train_function is None:
      inputs = []
      if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
        inputs += [K.learning_phase()]

      training_updates = self.optimizer.get_updates(
        self._collected_trainable_weights,
        self.constraints,
        self.total_loss)
      updates = self.updates + training_updates
      # Gets loss and metrics. Updates weights at each call.
      self.train_function = K.function(inputs,
                                       [self.total_loss] + self.metrics_tensors,
                                       updates=updates)
项目:Deconvnet-keras    作者:Jallet    | 项目源码 | 文件源码
def __init__(self, layer, linear = False):
        '''
        # Arguments
            layer: an instance of Activation layer, whose configuration 
                   will be used to initiate DActivation(input_shape, 
                   output_shape, weights)
        '''
        self.layer = layer
        self.linear = linear
        self.activation = layer.activation
        input = K.placeholder(shape = layer.output_shape)

        output = self.activation(input)
        # According to the original paper, 
        # In forward pass and backward pass, do the same activation(relu)
        self.up_func = K.function(
                [input, K.learning_phase()], output)
        self.down_func = K.function(
                [input, K.learning_phase()], output)

    # Compute activation in forward pass
项目:Sub-word-LSTM    作者:DrImpossible    | 项目源码 | 文件源码
def get_activations(model, layer, X_batch):
    """
    Purpose -> Obtains outputs from any layer in Keras
    Input   -> Trained model, layer from which output needs to be extracted & files to be given as input
    Output  -> Features from that layer 
    """
    #Referred from:- TODO: Enter the forum link from where I got this
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output,])
    activations = get_activations([X_batch,0])
    return activations
#h5f = h5py.File(Masterdir+Datadir+'Xtest_'+experiment_details+'.h5','r')
#X_test = h5f['dataset'][:]
#h5f.close()
#print(X_test.shape)
#
#inp = open(Masterdir+Datadir+'Ytest_'+experiment_details+'.pkl', 'rb')
#y_test=pickle.load(inp)
#inp.close()
#y_test=np.asarray(y_test).flatten()
#y_test2 = np_utils.to_categorical(y_test, numclasses) 
#print(y_test.shape)
项目:detecting-adversarial-samples    作者:rfeinman    | 项目源码 | 文件源码
def get_deep_representations(model, X, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param batch_size:
    :return:
    """
    # last hidden layer is always at index -4
    output_dim = model.layers[-4].output.shape[-1].value
    get_encoding = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-4].output]
    )

    n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
    output = np.zeros(shape=(len(X), output_dim))
    for i in range(n_batches):
        output[i * batch_size:(i + 1) * batch_size] = \
            get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]

    return output
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def dropout_predict(self, x, z, n_samples=100):
        if isinstance(x, list):
            inputs = [z] + x
        else:
            inputs = [z, x]
        if not hasattr(self, "_dropout_predict"):

            predict_with_dropout = K.function(self.inputs + [K.learning_phase()],
                                              [self.layers[-1].output])

            def pred(inputs, n_samples = 100):
                # draw samples from the treatment network with dropout turned on
                samples = self.treatment.sample(inputs, n_samples, use_dropout=True)
                # prepare inputs for the response network
                rep_inputs = [i.repeat(n_samples, axis=0) for i in inputs[1:]] + [samples]
                # return outputs from the response network with dropout turned on (learning_phase=0)
                return predict_with_dropout(rep_inputs + [1])[0]
            self._dropout_predict = pred
            return self._dropout_predict(inputs, n_samples)
        else:
            return self._dropout_predict(inputs, n_samples)
项目:unblackboxing_webinar    作者:deepsense-ai    | 项目源码 | 文件源码
def _get_output_functions(self):
        # if you name your layers you can use model.get_layer('recurrent_layer')
        model = self.tweet_classifier
        recurrent_layer = model.layers[2]
        attention_layer = model.layers[5]
        merged_layer = model.layers[9]
        output_layer = model.layers[10]
        layers = [recurrent_layer, attention_layer, merged_layer, output_layer]   

        outputs = []        
        for l in layers:
            outputs.append(l.output)

            loss = K.mean(model.output)
            grads = K.gradients(loss, l.output)
            grads_norm = grads / (K.sqrt(K.mean(K.square(grads))) + 1e-5)
            outputs.append(grads_norm)

        all_function = K.function([model.layers[0].input, K.learning_phase()],
                                  outputs)
        return all_function
项目:deeputil    作者:Avkash    | 项目源码 | 文件源码
def get_model_activation_obj(model, model_layer_id, img_array, show_info=True):
    """
    This function returns the activation object for the give layer id along with image to be classified
    :param model:
    :param model_layer_id:
    :param img_array:
    :return:
    """
    assert isinstance(model, keras.engine.training.Model)
    assert model_layer_id >= 0
    assert model_layer_id <= len(model.layers)
    #assert img_array
    utils.helper_functions.show_print_message(
        "Now getting activation object for the selected layer " + str(model_layer_id) + " from the given model", show_info)

    activation_temp = K.function([model.layers[0].input, K.learning_phase()],[model.layers[model_layer_id].output,])
    utils.helper_functions.show_print_message(
        "Activation object is collected successfully", show_info)
    return activation_temp([img_array, 0])
项目:deeputil    作者:Avkash    | 项目源码 | 文件源码
def get_model_layer_feature_map_counts(model, model_layer_id, img_array, show_info=True):
    """
    :param model:
    :param model_layer_id:
    :param img_array:
    :return:
    """
    assert isinstance(model, keras.engine.training.Model)
    assert model_layer_id >= -1
    result = 0
    utils.helper_functions.show_print_message(
        "Now collecting feature map for the selected layer in the given model..", show_info)

    activation_temp = K.function([model.layers[0].input, K.learning_phase()], [model.layers[model_layer_id].output, ])
    activationObj = activation_temp([img_array, 0])
    try:
        if (np.shape(activationObj[0][0][0]))[1]:
            result = (np.shape(activationObj[0][0][0]))[1]
    except IndexError:
        utils.helper_functions.show_print_message(
            "Error: Unable to get feature map from the selected layer in this model..", show_info)

    return result
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def predict_action(self, state):
        """Preduct Optimal Portfolio

        Args:
            state(float): stock data with size: [self.n_stock, ]
        Retrun:
            np.array with size: [self.n_stock, ]
        """
        pred_state = self.memory[0].sample_state_uniform(self.n_batch, self.n_history)
        new_state = pred_state[-1]
        new_state = np.concatenate((new_state[1:], [state]), axis=0)
        pred_state = np.concatenate((pred_state[:-1], [new_state]), axis=0)
        action = self.actor_output.eval(
            session=self.sess,
            feed_dict={self.state: pred_state, K.learning_phase(): 0})[-1]
        # action = self.norm_action(action)
        return action
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def update_memory(self, state, state_forward):
        # update memory without updating weight
        for i in range(self.n_memory):
            self.memory[i].observations.append(state)
            self.memory[i].priority.append(1.0)
        # to stabilize batch normalization, use other samples for prediction
        pred_state = self.memory[0].sample_state_uniform(self.n_batch, self.n_history)
        # off policy action and update portfolio
        actor_action = self.actor_output.eval(session=self.sess,
                                      feed_dict={self.state: pred_state,
                                                          K.learning_phase(): 0})[-1]
        action_scale = np.mean(np.abs(actor_action))
        # action_off = np.round(actor_value_off + np.random.normal(0, noise_scale, self.n_stock))
        for i in range(self.n_memory):
            action_off = actor_action + np.random.normal(0, action_scale * self.noise_scale, self.n_stock)
            action_off = self.norm_action(action_off)
            # action_off = actor_value_off
            reward_off = reward = np.sum((state_forward - state) * action_off)
            self.memory[i].rewards.append(reward_off)
            self.memory[i].actions.append(action_off)
项目:DQN    作者:jjakimoto    | 项目源码 | 文件源码
def predict_action(self, state):
        """Preduct Optimal strategy

        Args:
            state(float): stock data with size: [self.n_stock, ]
        Retrun:
            integer: 0-exit, 1-stay
        """
        pred_state = self.memory[0].sample_state_uniform(self.n_batch, self.n_history)
        new_state = pred_state[-1]
        new_state = np.concatenate((new_state[1:], [state]), axis=0)
        pred_state = np.concatenate((pred_state[:-1], [new_state]), axis=0)
        action = self.max_action.eval(
            session=self.sess,
            feed_dict={self.state: pred_state, K.learning_phase(): 0})[-1]
        return action
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def build_predict_func(mod):
    """Build Keras prediction functions based on a Keras model

    Using inputs and outputs of the graph a prediction function
    (forward pass) is compiled for prediction purpose.

    Args:
        mod(keras.models): a Model or Sequential model

    Returns:
        a Keras (Theano or Tensorflow) function
    """
    import keras.backend as K
    if mod.uses_learning_phase:
        tensors = mod.inputs + [K.learning_phase()]
    else:
        tensors = mod.inputs
    return K.function(tensors, mod.outputs, updates=mod.state_updates)
项目:rl-teacher    作者:nottombrown    | 项目源码 | 文件源码
def train_predictor(self):
        self.comparison_collector.label_unlabeled_comparisons()

        minibatch_size = min(64, len(self.comparison_collector.labeled_decisive_comparisons))
        labeled_comparisons = random.sample(self.comparison_collector.labeled_decisive_comparisons, minibatch_size)
        left_obs = np.asarray([comp['left']['obs'] for comp in labeled_comparisons])
        left_acts = np.asarray([comp['left']['actions'] for comp in labeled_comparisons])
        right_obs = np.asarray([comp['right']['obs'] for comp in labeled_comparisons])
        right_acts = np.asarray([comp['right']['actions'] for comp in labeled_comparisons])
        labels = np.asarray([comp['label'] for comp in labeled_comparisons])

        with self.graph.as_default():
            _, loss = self.sess.run([self.train_op, self.loss_op], feed_dict={
                self.segment_obs_placeholder: left_obs,
                self.segment_act_placeholder: left_acts,
                self.segment_alt_obs_placeholder: right_obs,
                self.segment_alt_act_placeholder: right_acts,
                self.labels: labels,
                K.learning_phase(): True
            })
            self._elapsed_predictor_training_iters += 1
            self._write_training_summaries(loss)
项目:rl-teacher    作者:nottombrown    | 项目源码 | 文件源码
def _write_training_summaries(self, loss):
        self.agent_logger.log_simple("predictor/loss", loss)

        # Calculate correlation between true and predicted reward by running validation on recent episodes
        recent_paths = self.agent_logger.get_recent_paths_with_padding()
        if len(recent_paths) > 1 and self.agent_logger.summary_step % 10 == 0:  # Run validation every 10 iters
            validation_obs = np.asarray([path["obs"] for path in recent_paths])
            validation_acts = np.asarray([path["actions"] for path in recent_paths])
            q_value = self.sess.run(self.q_value, feed_dict={
                self.segment_obs_placeholder: validation_obs,
                self.segment_act_placeholder: validation_acts,
                K.learning_phase(): False
            })
            ep_reward_pred = np.sum(q_value, axis=1)
            reward_true = np.asarray([path['original_rewards'] for path in recent_paths])
            ep_reward_true = np.sum(reward_true, axis=1)
            self.agent_logger.log_simple("predictor/correlations", corrcoef(ep_reward_true, ep_reward_pred))

        self.agent_logger.log_simple("predictor/num_training_iters", self._elapsed_predictor_training_iters)
        self.agent_logger.log_simple("labels/desired_labels", self.label_schedule.n_desired_labels)
        self.agent_logger.log_simple("labels/total_comparisons", len(self.comparison_collector))
        self.agent_logger.log_simple(
            "labels/labeled_comparisons", len(self.comparison_collector.labeled_decisive_comparisons))
项目:face-identification-tpe    作者:meownoid    | 项目源码 | 文件源码
def predict(self, data_x, batch_size=32, learning_phase=False):
        n_data = len(data_x)
        n_batches = n_data // batch_size + (0 if n_data % batch_size == 0 else 1)

        result = None

        learning_phase = 1 if learning_phase else 0

        for i in range(n_batches):
            batch_x = data_x[i * batch_size:(i + 1) * batch_size]
            batch_y = self.fn([batch_x, 0])[0]

            if result is None:
                result = batch_y
            else:
                result = np.vstack([result, batch_y])

        return result
项目:value_gradient    作者:rarilurelo    | 项目源码 | 文件源码
def optimize_q(self, batch, epoch):
        if not self.built:
            self.build()
        if self.use_pi(epoch):
            next_v = self.sess.run(self.target_v_from_pi, {self.states: batch['next_states'], K.learning_phase(): 1})
        else:
            next_v = self.sess.run(self.target_v_from_uniform, {self.states: batch['next_states'], K.learning_phase(): 1})
        ys = batch['rewards'] + self.gamma * (1 - batch['terminals']) * next_v
        feed_in = {
                self.states: batch['states'],
                self.actions: batch['actions'],
                self.rewards: batch['rewards'],
                self.ys: ys,
                K.learning_phase(): 1
                }
        self.sess.run(self.q_updater, feed_in)
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def _sample_predictive(self, test_x=None, return_stats=False, **kwargs):
        """ Draws a new sample from the model. """
        if self._sample_predictive_fn is None:
            self._sample_predictive_fn = K.function([self.model.layers[0].input, K.learning_phase()],
                                                    [self.model.layers[-1].output])

        sample = self._sample_predictive_fn([test_x, 1])

        stats = None
        if return_stats:
            stats = SampleStats(time=self._running_time())

        return sample, [stats]
项目:detection-2016-nipsws    作者:imatge-upc    | 项目源码 | 文件源码
def get_feature_map_8(model, im):
    im = im.astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, model.outputs)
    feature_map = _convout1_f([0] + [im])
    feature_map = np.array([feature_map])
    feature_map = feature_map[0, 0, 0, :, :, :]
    return feature_map


# get shallower feature map
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def train(self, batch_size = 128, num_epochs = 20):
        data_size = self.x_train.shape[0]
        num_batches = int(data_size/batch_size)

        for e in np.arange(num_epochs):
            # shuffle training data index
            permute_idx = np.random.permutation(np.arange(data_size))

            for b in np.arange(num_batches):

                # get data batch
                x_batch = self.x_train[permute_idx[b*batch_size:(b+1)*batch_size]]
                y_batch = self.y_train[permute_idx[b*batch_size:(b+1)*batch_size]]

                self.sess.run(self.opt,
                              feed_dict = {self.images:x_batch, self.labels:y_batch,
                                           K.learning_phase(): 1})

                if b%100 == 0:
                    acc = self.sess.run(self.accuracy,
                                        feed_dict = {self.images:x_batch, self.labels:y_batch,
                                                     K.learning_phase(): 1})
                    print('training epoch : {}, batch : {}, accuracy : {}'.format(e, b, acc))

            self.valid()
            self.model.save_weights('./weights_{}.h5'.format(e))
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def valid(self, weights_file = None):
        if weights_file is not None:
            self.model.load_weights(weights_file)

        val_idx = np.random.randint(self.x_test.shape[0], size = 128)
        x_val = self.x_test[val_idx]
        y_val = self.y_test[val_idx]
        acc_val = self.sess.run(self.accuracy,
                                feed_dict = {self.images:x_val, self.labels:y_val,
                                             K.learning_phase(): 1})
        print('validation accuracy : {}'.format(acc_val))
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def get_activations(model, layer_name,input_img):
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    get_activations = K.function([model.layers[0].input, K.learning_phase()], layer_dict[layer_name].output)
    activations = get_activations([input_img,0])
    return activations
项目:keras-utilities    作者:cbaziotis    | 项目源码 | 文件源码
def get_activations(model, layer, X_batch):
        get_activations = K.function(
            [model.layers[0].input, K.learning_phase()],
            model.layers[layer].output)
        activations = get_activations([X_batch, 0])
        return activations

    #
项目:keras-utilities    作者:cbaziotis    | 项目源码 | 文件源码
def get_input_mask(model, layer, X_batch):
        get_input_mask = K.function([model.layers[0].input, K.learning_phase()],
                                    model.layers[layer].input_mask)
        input_mask = get_input_mask([X_batch, 0])
        return input_mask
项目:keras-utilities    作者:cbaziotis    | 项目源码 | 文件源码
def get_output_mask(model, layer, X_batch):
        get_output_mask = K.function(
            [model.layers[0].input, K.learning_phase()],
            model.layers[layer].output_mask)
        output_mask = get_output_mask([X_batch, 0])
        return output_mask
项目:keras-utilities    作者:cbaziotis    | 项目源码 | 文件源码
def get_input(model, layer, X_batch):
        get_input = K.function([model.layers[0].input, K.learning_phase()],
                               model.layers[layer].input)
        _input = get_input([X_batch, 0])
        return _input
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def prediction_batch(self, input_batch: ndarray) -> ndarray:
        """Predicts a grapheme probability batch given a spectrogram batch, employing the learned predictive network."""
        # Indicates to use prediction phase in order to disable dropout (see backend.learning_phase documentation):
        return self.get_prediction_batch([input_batch, self.prediction_phase_flag])[0]
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def get_prediction_batch(self):
        return backend.function(self.predictive_net.inputs + [backend.learning_phase()], self.predictive_net.outputs)
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def get_predicted_graphemes_and_loss_batch(self):
        return backend.function(self.loss_net.inputs + [backend.learning_phase()],
                                [single(self.decoding_net.outputs), single(self.loss_net.outputs)])
项目:siamese_sentiment    作者:jcavalieri8619    | 项目源码 | 文件源码
def get_network_layer_output(model, dataInput, layerNum, **kwargs):
    """

    :param model:
    :param dataInput:
    :param layerNum:
    :param kwargs:
    :return:
    """
    get_output = K.function([model.layers[0].input, K.learning_phase()],
                            [model.layers[layerNum].output])

    phase = kwargs.get('phase', None)

    if phase is None or phase == 'test':
        # output in test mode = 0
        layer_output = get_output([dataInput, 0])[0]

    elif phase == 'train':
        # output in train mode = 1
        layer_output = get_output([dataInput, 1])[0]

    else:
        raise RuntimeError("invalid phase passed to get_network_layer_output")

    return layer_output
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def gradients(self, states, actions):
        return self.sess.run(self.action_grads, feed_dict={
            self.state: states,
            self.action: actions,
            K.learning_phase(): 1
        })[0]
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def gradients(self, states, actions):
        return self.sess.run(self.action_grads, feed_dict={
            self.state: states,
            self.action: actions,
            K.learning_phase(): 1
        })[0]
项目:Multi-Agent_SelfDriving    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def train(self, states, action_grads):
        self.sess.run(self.optimize, feed_dict={
            self.state: states,
            self.action_gradient: action_grads,
            K.learning_phase(): 1
        })
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        import tensorflow as tf

        if self.model.validation_data and self.histogram_freq:
            if epoch % self.histogram_freq == 0:
                # TODO: implement batched calls to sess.run
                # (current call will likely go OOM on GPU)
                if self.model.uses_learning_phase:
                    cut_v_data = len(self.model.inputs)
                    val_data = self.model.validation_data[:cut_v_data] + [0]
                    tensors = self.model.inputs + [K.learning_phase()]
                else:
                    val_data = self.model.validation_data
                    tensors = self.model.inputs
                feed_dict = dict(zip(tensors, val_data))
                result = self.sess.run([self.merged], feed_dict=feed_dict)
                summary_str = result[0]
                self.writer.add_summary(summary_str, epoch)

        for name, value in logs.items():
            if name in ['batch', 'size']:
                continue
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.writer.add_summary(summary, epoch)
        self.writer.flush()
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_learning_phase():
    a = Input(shape=(32,), name='input_a')
    b = Input(shape=(32,), name='input_b')

    a_2 = Dense(16, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    assert dp.uses_learning_phase

    assert not a_2._uses_learning_phase
    assert b_2._uses_learning_phase

    # test merge
    m = merge([a_2, b_2], mode='concat')
    assert m._uses_learning_phase

    # Test recursion
    model = Model([a, b], [a_2, b_2])
    print(model.input_spec)
    assert model.uses_learning_phase

    c = Input(shape=(32,), name='input_c')
    d = Input(shape=(32,), name='input_d')

    c_2, b_2 = model([c, d])
    assert c_2._uses_learning_phase
    assert b_2._uses_learning_phase

    # try actually running graph
    fn = K.function(model.inputs + [K.learning_phase()], model.outputs)
    input_a_np = np.random.random((10, 32))
    input_b_np = np.random.random((10, 32))
    fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
    fn_outputs_dp = fn([input_a_np, input_b_np, 1])
    # output a: nothing changes
    assert fn_outputs_no_dp[0].sum() == fn_outputs_dp[0].sum()
    # output b: dropout applied
    assert fn_outputs_no_dp[1].sum() != fn_outputs_dp[1].sum()
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def get_relu_activations(self):
        model_input = self.model.input
        is_multi_input = isinstance(model_input, list)
        if not is_multi_input:
            model_input = [model_input]

        funcs = [K.function(model_input + [K.learning_phase()], [layer.output]) for layer in self.model.layers]
        if is_multi_input:
            list_inputs = []
            list_inputs.extend(self.x_train)
            list_inputs.append(1.)
        else:
            list_inputs = [self.x_train, 1.]

        layer_outputs = [func(list_inputs)[0] for func in funcs]
        for layer_index, layer_activations in enumerate(layer_outputs):
            if self.is_relu_layer(self.model.layers[layer_index]):
                layer_name = self.model.layers[layer_index].name
                # layer_weight is a list [W] (+ [b])
                layer_weight = self.model.layers[layer_index].get_weights()
                # with kernel and bias, the weights are saved as a list [W, b]. If only weights, it is [W]
                if type(layer_weight) is not list:
                    raise ValueError("'Layer_weight' should be a list, but was {}".format(type(layer_weight)))

                layer_weight_shape = np.shape(layer_weight[0])
                yield [layer_index, layer_activations, layer_name, layer_weight_shape]
项目:keras-visualize-activations    作者:philipperemy    | 项目源码 | 文件源码
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
    print('----- activations -----')
    activations = []
    inp = model.input

    model_multi_inputs_cond = True
    if not isinstance(inp, list):
        # only one input! let's wrap it in a list.
        inp = [inp]
        model_multi_inputs_cond = False

    outputs = [layer.output for layer in model.layers if
               layer.name == layer_name or layer_name is None]  # all layer outputs

    funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs]  # evaluation functions

    if model_multi_inputs_cond:
        list_inputs = []
        list_inputs.extend(model_inputs)
        list_inputs.append(0.)
    else:
        list_inputs = [model_inputs, 0.]

    # Learning phase. 0 = Test mode (no dropout or batch normalization)
    # layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
    layer_outputs = [func(list_inputs)[0] for func in funcs]
    for layer_activations in layer_outputs:
        activations.append(layer_activations)
        if print_shape_only:
            print(layer_activations.shape)
        else:
            print(layer_activations)
    return activations
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency
项目:gandlf    作者:codekansas    | 项目源码 | 文件源码
def _get_learning_phase(self):
        if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
            return [K.learning_phase()]
        else:
            return []
项目:neuroblast    作者:ActiveState    | 项目源码 | 文件源码
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
    activations = []
    inp = model.input

    model_multi_inputs_cond = True
    if not isinstance(inp, list):
        # only one input! let's wrap it in a list.
        inp = [inp]
        model_multi_inputs_cond = False

    outputs = [layer.output for layer in model.layers if
               layer.name == layer_name or layer_name is None]  # all layer outputs

    funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs]  # evaluation functions

    if model_multi_inputs_cond:
        list_inputs = []
        list_inputs.extend(model_inputs)
        list_inputs.append(1.)
    else:
        list_inputs = [model_inputs, 1.]

    # Learning phase. 1 = Test mode (no dropout or batch normalization)
    # layer_outputs = [func([model_inputs, 1.])[0] for func in funcs]
    layer_outputs = [func(list_inputs)[0] for func in funcs]
    for layer_activations in layer_outputs:
        activations.append(layer_activations)
    return activations


## Global vars used for vizmodel == 1