Python keras.backend 模块,categorical_crossentropy() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用keras.backend.categorical_crossentropy()

项目:kaos    作者:RuiShu    | 项目源码 | 文件源码
def _define_io_loss_xy(self):
        u, p, q, s = {}, {}, {}, {}
        x, y = Input(shape=(784,)), Input(shape=(10,))
        q['z'], s['z'], p['x'] = self.xy_graph(x, y)
        u['x'] = self.u_net['x'](x)
        q['y'] = self.q_net['y'](u['x'])

        def alpha_loss(y, y_param):
            return K.categorical_crossentropy(q['y'], y)

        def xy_loss(x, x_param):
            return self.labeled_loss(x, q['z'], s['z'], p['x'])

        self._predict = K.function([x, K.learning_phase()], q['y'])
        return self._standardize_io_loss([x, y],
                                         [q['y'], p['x']],
                                         [alpha_loss, xy_loss])
项目:ensemble-adv-training    作者:ftramer    | 项目源码 | 文件源码
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    else:
        out = K.sum(out)
    return out
项目:tying-wv-and-wc    作者:icoxfog417    | 项目源码 | 文件源码
def augmented_loss(self, y_true, y_pred):
        _y_pred = Activation("softmax")(y_pred)
        loss = K.categorical_crossentropy(_y_pred, y_true)

        # y is (batch x seq x vocab)
        y_indexes = K.argmax(y_true, axis=2)  # turn one hot to index. (batch x seq)
        y_vectors = self.embedding(y_indexes)  # lookup the vector (batch x seq x vector_length)

        #v_length = self.setting.vector_length
        #y_vectors = K.reshape(y_vectors, (-1, v_length))
        #y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors)
        #y_t = K.squeeze(y_t, axis=2)  # unknown but necessary operation
        #y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))

        # vector x embedding dot products (batch x seq x vocab)
        y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1)
        y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))  # explicitly set shape
        y_t = K.softmax(y_t / self.temperature)
        _y_pred_t = Activation("softmax")(y_pred / self.temperature)
        aug_loss = kullback_leibler_divergence(y_t, _y_pred_t)
        loss += (self.gamma * self.temperature) * aug_loss
        return loss
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_experiment_instance_utils(self, get_model):
        new_session()
        model = get_model()

        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        expe = Experiment(model)
        expe.model_dict = model
        expe.backend_name = 'another_backend'
        expe.model_dict = model

        assert expe.backend is not None
        expe = Experiment()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_build_predict_func(self, get_model):
        """Test the build of a model"""
        new_session()
        X_tr = np.ones((train_samples, input_dim))
        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_name = model.__class__.__name__

        pred_func = KTB.build_predict_func(model)

        tensors = [X_tr]
        if model_name != 'Model':
            tensors.append(1.)

        res = pred_func(tensors)

        assert len(res[0]) == len(X_tr)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_fit(self, get_model):
        "Test the training of a serialized model"
        new_session()
        data, data_val = make_data(train_samples, test_samples)

        model = get_model()
        model.compile(loss='categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])

        model_dict = dict()
        model_dict['model_arch'] = to_dict_w_opt(model)

        res = KTB.train(copy.deepcopy(model_dict['model_arch']), [data],
                        [data_val], [])
        res = KTB.fit(NAME, VERSION, model_dict, [data], 'test', [data_val],
                      [])

        assert len(res) == 4

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def w_categorical_crossentropy(y_true, y_pred):
  weights = np.array([[1., 5.],  # misclassify N -> Y
                      [10., 1.]])# misclassify Y -> N
  nb_cl = len(weights)
  final_mask = K.zeros_like(y_pred[:, 0])
  y_pred_max = K.max(y_pred, axis=1)
  y_pred_max = K.expand_dims(y_pred_max, 1)
  y_pred_max_mat = K.equal(y_pred, y_pred_max)
  for c_p, c_t in product(range(nb_cl), range(nb_cl)):
    final_mask += (
    K.cast(weights[c_t, c_p], K.floatx()) *
    K.cast(y_pred_max_mat[:, c_p],
           K.floatx()) *
    K.cast(y_true[:, c_t],K.floatx()))
  return K.categorical_crossentropy(y_pred, y_true) * final_mask
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_check_not_failing():
    a = np.random.random((2, 1, 3))
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, None, 3)])
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_check_last_is_one():
    a = np.random.random((2, 3, 1))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])

    assert "You are passing a target array" in str(exc)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def test_check_bad_shape():
    a = np.random.random((2, 3, 5))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, 3, 6)])

    assert "targets to have the same shape" in str(exc)
项目:NetworkCompress    作者:luzai    | 项目源码 | 文件源码
def kd_loss(y_true, y_pred):
    return K.categorical_crossentropy(y_pred, y_true, from_logits=True)


# 1. get transfer data and test data
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def categoricalCrossentropyWithWeights(ytrueWithWeights, ypred):
    '''Like regular categorical cross entropy, but with sample weights for every row.
    ytrueWithWeights is a matrix where the first columns are one hot encoder for the
    classes, while the last column contains the sample weights.
    '''
    return K.categorical_crossentropy(ypred, ytrueWithWeights[:, :-1]) * ytrueWithWeights[:, -1]
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred)
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def setupModel(self):
        '''Setup models:
        self.actionModel is the action predictions.
        self.valueModel is the prediction of the value function.
        self.model is the model with both outputs
        '''
        if self.resume:
            self.model = load_model(self.modelFileName)
            # Need the other models as well...
            return
        inputShape = (self.D, self.D, self.nbImgInState)
        model = self.deepMindAtariNet(self.nbClasses, inputShape, includeTop=False)
        inp = Input(shape=inputShape)
        x = model(inp)
        x = Flatten()(x)
        x = Dense(512, activation='relu', name='dense1')(x)

        action = Dense(self.nbClasses, activation='softmax', name='action')(x)
        self.actionModel = Model(inp, action)
        # Should we compile model?

        value = Dense(1, activation='linear', name='value')(x)
        self.valueModel = Model(inp, value)
        # Should we compile model?

        self.model = Model(inp, [action, value])
        # loss = {'action': 'categorical_crossentropy', 'value': 'mse'}
        # loss = {'action': categoricalCrossentropyWithWeights, 'value': 'mse'}
        actionAndEntropyLoss = makeActionAndEntropyLossA3C(self.entropyBeta)
        loss = {'action': actionAndEntropyLoss, 'value': 'mse'}
        loss_weights = {'action': 1, 'value': self.mseBeta}
        optim = RMSprop(self.learningRate, self.decayRate)
        self.model.compile(optim, loss) # Need to make it possible to set other optimizers
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def categoricalCrossentropyWithWeights(ytrueWithWeights, ypred):
    '''Like regular categorical cross entropy, but with sample weights for every row.
    ytrueWithWeights is a matrix where the first columns are one hot encoder for the
    classes, while the last column contains the sample weights.
    '''
    return K.categorical_crossentropy(ypred, ytrueWithWeights[:, :-1]) * ytrueWithWeights[:, -1]
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred)
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def entropyLoss(ypred):
    '''Entropy loss.
    Loss = - sum(pred * log(pred))
    '''
    return K.categorical_crossentropy(ypred, ypred)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_check_not_failing():
    a = np.random.random((2, 1, 3))
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, None, 3)])
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_check_last_is_one():
    a = np.random.random((2, 3, 1))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])

    assert "You are passing a target array" in str(exc)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_check_bad_shape():
    a = np.random.random((2, 3, 5))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, 3, 6)])

    assert "targets to have the same shape" in str(exc)
项目:kaos    作者:RuiShu    | 项目源码 | 文件源码
def _define_io_loss_x(self):
        u, p, q, s = {}, {}, {}, {}
        x, y = Input(shape=(784,)), Input(shape=(10,))
        u['x'] = self.u_net['x'](x)
        q['y'] = self.q_net['y'](u['x'])

        # create one-hots
        s['y'] = []
        for i in xrange(10):
            l = Lambda(lambda x: x + K.variable(np.eye(10)[i]), (10,))
            s['y'] += [l(y)]

        q['z'], s['z'], p['x'] = [], [], []
        for i in xrange(10):
            a, b, c = self.xy_graph(x, s['y'][i])
            q['z'] += [a]
            s['z'] += [b]
            p['x'] += [c]

        def x_loss(x, x_param):
            loss = -K.categorical_crossentropy(q['y'], q['y'])
            for i in xrange(10):
                loss += q['y'][:, i] * self.labeled_loss(x, q['z'][i], s['z'][i], p['x'][i])
            return loss

        return self._standardize_io_loss([x, y], p['x'][0], x_loss)
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def masked_categorical_crossentropy(y_true, y_pred):
    mask = K.cast(K.expand_dims(K.any(y_true, -1), axis=-1), 'float32')
    y_pred *= mask
    y_pred += 1-mask
    y_pred += 1-mask
    losses = K.categorical_crossentropy(y_pred, y_true)
    losses *= K.squeeze(mask, -1)
    ## Normalize by number of real segments, using a small non-zero denominator in cases of padding characters
    ## in order to avoid division by zero
    #losses /= (K.mean(mask) + (1e-10*(1-K.mean(mask))))
    return losses
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_check_not_failing():
    a = np.random.random((2, 1, 3))
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])
    check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, None, 3)])
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_check_last_is_one():
    a = np.random.random((2, 3, 1))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [a.shape])

    assert "You are passing a target array" in str(exc)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_check_bad_shape():
    a = np.random.random((2, 3, 5))
    with pytest.raises(Exception) as exc:
        check_loss_and_target_compatibility([a], [K.categorical_crossentropy], [(2, 3, 6)])

    assert "targets to have the same shape" in str(exc)
项目:PaintingToArtists    作者:achintyagopal    | 项目源码 | 文件源码
def my_objective_function(y_true, y_pred):
    # use categorial cross entropy
    # basically normal cross entropy (> 0 -> 1, < 0 -> 0), multiply by y_true
    #return y_true * K.categorical_crossentropy(y_pred, (K.sign(y_true) + 1) / 2)
    return K.abs(y_true[0][0]) * K.categorical_crossentropy(y_pred, K.sign(y_true) + 1) / 2.0
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def get_loss():
    def return_loss():
        import keras.backend as K
        def cat_cross(y_true, y_pred):
            '''A test of custom loss function
            '''
            return K.categorical_crossentropy(y_pred, y_true)
        return cat_cross
    return return_loss
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def get_loss_metric(self, request):
        if request.param == 'classic':
            return 'categorical_crossentropy', 'accuracy'
        elif request.param == 'custom':
            return get_loss(), get_metric()
        elif request.param == 'list':
            return [get_loss()], get_metric()
        print(self)
项目:python-alp    作者:tboquet    | 项目源码 | 文件源码
def test_deserialization(self):
        new_session()
        model = sequential()
        model.compile(optimizer='sgd', loss='categorical_crossentropy')
        ser_mod = to_dict_w_opt(model)
        custom_objects = {'test_loss': [1, 2]}
        custom_objects = {k: serialize(custom_objects[k])
                          for k in custom_objects}
        model_from_dict_w_opt(ser_mod, custom_objects=custom_objects)

        if K.backend() == 'tensorflow':
            K.clear_session()

        print(self)
项目:single_shot_multibox_detector    作者:oarriaga    | 项目源码 | 文件源码
def compute_loss(self, y_true, y_pred):

        class_loss = self.cross_entropy(y_true[:, :, 4:], y_pred[:, :, 4:])
        """
        class_loss = K.categorical_crossentropy(y_true[:, :, 4:],
                                                y_pred[:, :, 4:])
        """
        # return K.concatenate([class_loss, class_loss_old], axis=0)
        local_loss = self.smooth_l1(y_true[:, :, :4], y_pred[:, :, :4])
        negative_mask = y_true[:, :, 4 + self.background_id]
        positive_mask = 1 - negative_mask

        # calculating the positive loss
        positive_local_losses = local_loss * positive_mask
        positive_class_losses = class_loss * positive_mask
        positive_class_loss = K.sum(positive_class_losses, axis=-1)
        positive_local_loss = K.sum(positive_local_losses, axis=-1)

        # obtaining the number of negatives in the batch
        num_positives_per_sample = K.cast(K.sum(positive_mask, -1), 'int32')
        num_negatives_per_sample = K.cast(K.sum(negative_mask, -1), 'int32')
        num_negatives_in_batch = K.sum(num_negatives_per_sample)
        num_hard_negatives = self.neg_pos_ratio * num_positives_per_sample
        num_negatives = K.minimum(num_hard_negatives, num_negatives_in_batch)
        all_negative_class_losses = class_loss * negative_mask

        negative_class_loss = []
        for batch_arg in range(self.batch_size):
            sample_num_negatives = num_negatives[batch_arg]
            all_negative_sample_loss = all_negative_class_losses[batch_arg]
            negative_sample_losses = tf.nn.top_k(all_negative_sample_loss,
                                                 k=sample_num_negatives,
                                                 sorted=True)[0]
            negative_sample_loss = K.sum(negative_sample_losses)
            negative_sample_loss = K.expand_dims(negative_sample_loss, -1)
            negative_class_loss.append(negative_sample_loss)
        negative_class_loss = K.concatenate(negative_class_loss)
        return negative_class_loss

        class_loss = positive_class_loss + negative_class_loss
        total_loss = class_loss + (self.alpha * positive_local_loss)

        batch_mask = K.not_equal(num_positives_per_sample, 0)
        total_loss = tf.where(batch_mask, total_loss, K.zeros_like(total_loss))

        num_positives_per_sample = tf.where(
                batch_mask, num_positives_per_sample,
                K.ones_like(num_positives_per_sample))

        num_positives_per_sample = K.cast(num_positives_per_sample, 'float32')
        total_loss = total_loss / num_positives_per_sample
        return total_loss
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def cce_flatt(void_class, weights_class):
    def categorical_crossentropy_flatt(y_true, y_pred):
        '''Expects a binary class matrix instead of a vector of scalar classes.
        '''
        if dim_ordering == 'th':
            y_pred = K.permute_dimensions(y_pred, (0, 2, 3, 1))
        shp_y_pred = K.shape(y_pred)
        y_pred = K.reshape(y_pred, (shp_y_pred[0]*shp_y_pred[1]*shp_y_pred[2],
                           shp_y_pred[3]))  # go back to b01,c
        # shp_y_true = K.shape(y_true)

        if dim_ordering == 'th':
            y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01
        else:
            y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01

        # remove void classes from cross_entropy
        if len(void_class):
            for i in range(len(void_class)):
                # get idx of non void classes and remove void classes
                # from y_true and y_pred
                idxs = K.not_equal(y_true, void_class[i])
                if dim_ordering == 'th':
                    idxs = idxs.nonzero()
                    y_pred = y_pred[idxs]
                    y_true = y_true[idxs]
                else:
                    y_pred = tf.boolean_mask(y_pred, idxs)
                    y_true = tf.boolean_mask(y_true, idxs)

        if dim_ordering == 'th':
            y_true = T.extra_ops.to_one_hot(y_true, nb_class=y_pred.shape[-1])
        else:
            y_true = tf.one_hot(y_true, K.shape(y_pred)[-1], on_value=1, off_value=0, axis=None, dtype=None, name=None)
            y_true = K.cast(y_true, 'float32')  # b,01 -> b01
        out = K.categorical_crossentropy(y_pred, y_true)

        # Class balancing
        if weights_class is not None:
            weights_class_var = K.variable(value=weights_class)
            class_balance_w = weights_class_var[y_true].astype(K.floatx())
            out = out * class_balance_w

        return K.mean(out)  # b01 -> b,01
    return categorical_crossentropy_flatt
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def cce_flatt(void_class, weights_class):
    def categorical_crossentropy_flatt(y_true, y_pred):
        '''Expects a binary class matrix instead of a vector of scalar classes.
        '''
        if dim_ordering == 'th':
            y_pred = K.permute_dimensions(y_pred, (0, 2, 3, 1))
        shp_y_pred = K.shape(y_pred)
        y_pred = K.reshape(y_pred, (shp_y_pred[0]*shp_y_pred[1]*shp_y_pred[2],
                           shp_y_pred[3]))  # go back to b01,c
        # shp_y_true = K.shape(y_true)

        if dim_ordering == 'th':
            y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01
        else:
            y_true = K.cast(K.flatten(y_true), 'int32')  # b,01 -> b01

        # remove void classes from cross_entropy
        if len(void_class):
            for i in range(len(void_class)):
                # get idx of non void classes and remove void classes
                # from y_true and y_pred
                idxs = K.not_equal(y_true, void_class[i])
                if dim_ordering == 'th':
                    idxs = idxs.nonzero()
                    y_pred = y_pred[idxs]
                    y_true = y_true[idxs]
                else:
                    y_pred = tf.boolean_mask(y_pred, idxs)
                    y_true = tf.boolean_mask(y_true, idxs)

        if dim_ordering == 'th':
            y_true = T.extra_ops.to_one_hot(y_true, nb_class=y_pred.shape[-1])
        else:
            y_true = tf.one_hot(y_true, K.shape(y_pred)[-1], on_value=1, off_value=0, axis=None, dtype=None, name=None)
            y_true = K.cast(y_true, 'float32')  # b,01 -> b01
        out = K.categorical_crossentropy(y_pred, y_true)

        # Class balancing
        if weights_class is not None:
            weights_class_var = K.variable(value=weights_class)
            class_balance_w = weights_class_var[y_true].astype(K.floatx())
            out = out * class_balance_w

        return K.mean(out)  # b01 -> b,01
    return categorical_crossentropy_flatt