Python keras.backend 模块,argmax() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.argmax()

项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    correct_pixels_per_class = K.sum(equal_entries, axis=1)
    n_pixels_per_class = K.sum(y_true_reshaped,axis=1)

    acc = correct_pixels_per_class / n_pixels_per_class
    acc_mask = tf.is_finite(acc)
    acc_masked = tf.boolean_mask(acc,acc_mask)

    return K.mean(acc_masked)
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def get_sample_weight(label, whole_set):
    if label.ndim < 3: # in case output_size==1
        return None
    ret = []
    for i in label:
        ret.append([])
        tag = False
        for j in i:
            cha = whole_set[np.argmax(j)]
            weight = 0
            if cha == 'empty' and tag == False:
                weight = 1 # TODO
                tag = True 
            if cha != 'empty':
                weight = 1
            ret[-1].append(weight)
    ret = np.asarray(ret)
    return ret
项目:semantic-tagging    作者:bjerva    | 项目源码 | 文件源码
def make_weight_matrix(X_train):
    '''
    Create sample weights
    '''
    X_weights = np.zeros_like(X_train, dtype=np.float32)
    for idx, sentence in enumerate(X_train):
        for idy, word in enumerate(sentence):
            curr_class = np.argmax(y_train[idx, idy])
            if curr_class == 0:
                X_weights[idx, idy] = 1#e-8
            elif curr_class <= 1:
                X_weights[idx, idy] = 1#e-4
            else:
                X_weights[idx, idy] = 1#0

    return X_weights
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def mean_IoU(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped

    intersection = K.sum(equal_entries, axis=1)
    union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1)

    iou = intersection / (union_per_class - intersection)
    iou_mask = tf.is_finite(iou)
    iou_masked = tf.boolean_mask(iou,iou_mask)

    return K.mean( iou_masked )
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_keras(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return K.mean(f1_class)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_taskB(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #return average f1 score over all classes
    return K.mean(precision)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_task3(y_true, y_pred):
    #convert probas to 0,1
    y_ppred = K.zeros_like(y_true)
    y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = K.T.switch(K.T.eq(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #recall for each class
    recall = K.T.switch(K.T.eq(gold_cnt, 0), 0, y_true_pred/gold_cnt)

    #f1 for each class
    f1_class = K.T.switch(K.T.eq(precision + recall, 0), 0, 2*(precision*recall)/(precision+recall))

    #return average f1 score over all classes
    return f1_class[1]
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_taskB(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def precision_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # return average f1 score over all classes
    return K.mean(precision)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def precision_keras(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred/pred_cnt)

    #return average f1 score over all classes
    return K.mean(precision)
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def plotVAEpyplot(self, logdir, prefix, ctable=None, reverseUtt=False, batch_size=128, debug=False):
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        ticks = [[-1,-0.5,0,0.5,1]]*self.latentDim
        samplePoints = np.array(np.meshgrid(*ticks)).T.reshape(-1,3)
        input_placeholder = np.ones(tuple([len(samplePoints)] + list(self.phon.output_shape[1:-1]) + [1]))
        preds = self.decode_word([samplePoints, input_placeholder], batch_size=batch_size)
        if reverseUtt:
            preds = getYae(preds, reverseUtt)
        reconstructed = reconstructXae(np.expand_dims(preds.argmax(-1), -1), ctable, maxLen=5)
        for i in range(len(samplePoints)):
            ax.text(samplePoints[i,0], samplePoints[i,1], samplePoints[i,2], reconstructed[i])
        ax.set_xlim3d(-1, 1)
        ax.set_ylim3d(-1, 1)
        ax.set_zlim3d(-1, 1)
        pickle.dump(fig, file(logdir + '/' + prefix + '_VAEplot.3D.obj', 'wb'))

        plt.close(fig)
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def plotVAEplotly(self, logdir, prefix, ctable=None, reverseUtt=False, batch_size=128, debug=False):
        ticks = [[-1,-0.5,0,0.5,1]]*self.latentDim
        samplePoints = np.array(np.meshgrid(*ticks)).T.reshape(-1,3)
        input_placeholder = np.ones(tuple([len(samplePoints)] + list(self.phon.output_shape[1:-1]) + [1]))
        preds = self.decode_word([samplePoints, input_placeholder], batch_size=batch_size)
        if reverseUtt:
            preds = getYae(preds, reverseUtt)
        reconstructed = reconstructXae(np.expand_dims(preds.argmax(-1), -1), ctable, maxLen=5)

        data = [go.Scatter3d(
            x = samplePoints[:,0],
            y = samplePoints[:,1],
            z = samplePoints[:,2],
            text = reconstructed,
            mode='text'
        )]
        layout = go.Layout()
        fig = go.Figure(data=data, layout=layout)
        plotly.offline.plot(fig, filename=logdir + '/' + prefix + '_VAEplot.html', auto_open=False)
项目:tying-wv-and-wc    作者:icoxfog417    | 项目源码 | 文件源码
def augmented_loss(self, y_true, y_pred):
        _y_pred = Activation("softmax")(y_pred)
        loss = K.categorical_crossentropy(_y_pred, y_true)

        # y is (batch x seq x vocab)
        y_indexes = K.argmax(y_true, axis=2)  # turn one hot to index. (batch x seq)
        y_vectors = self.embedding(y_indexes)  # lookup the vector (batch x seq x vector_length)

        #v_length = self.setting.vector_length
        #y_vectors = K.reshape(y_vectors, (-1, v_length))
        #y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors)
        #y_t = K.squeeze(y_t, axis=2)  # unknown but necessary operation
        #y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))

        # vector x embedding dot products (batch x seq x vocab)
        y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1)
        y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))  # explicitly set shape
        y_t = K.softmax(y_t / self.temperature)
        _y_pred_t = Activation("softmax")(y_pred / self.temperature)
        aug_loss = kullback_leibler_divergence(y_t, _y_pred_t)
        loss += (self.gamma * self.temperature) * aug_loss
        return loss
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def compute_error_matrix(y_true, y_pred):
    """Compute Confusion matrix (a.k.a. error matrix).

    a       predicted
    c       0   1   2
    t  0 [[ 5,  3,  0],
    u  1  [ 2,  3,  1],
    a  2  [ 0,  2, 11]]
    l

    Note true positves are in diagonal
    """
    # Find channel axis given backend
    if K.image_data_format() == 'channels_last':
        ax_chn = 3
    else:
        ax_chn = 1
    classes = y_true.shape[ax_chn]
    confusion = get_confusion(K.argmax(y_true, axis=ax_chn).flatten(),
                              K.argmax(y_pred, axis=ax_chn).flatten(),
                              classes)
    return confusion
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def accuracy(y_true, y_pred):
    return K.mean(K.equal(K.argmax(y_true, axis=4), K.argmax(y_pred, axis=4)))
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def accuracy_mask(mask):
    def accuracy(y_true, y_pred):
        intersection = K.equal(K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1))
        intersection_masked = mask * intersection
        return 1.0 * K.sum(intersection_masked) / K.sum(mask)

    return accuracy
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def precision(y_true, y_pred):
  y_true, y_pred = K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)
  y_true, y_pred = K.cast(y_true, 'float32'), K.cast(y_pred, 'float32')
  TP = K.sum(K.clip(y_true * y_pred, 0, 1)) # how many
  predicted_positives = K.sum(K.clip(y_pred, 0, 1))
  return TP / (predicted_positives + K.epsilon())
项目:SNLI-Keras    作者:adamzjk    | 项目源码 | 文件源码
def recall(y_true, y_pred):
  y_true, y_pred = K.argmax(y_true, axis=1), K.argmax(y_pred, axis=1)
  y_true, y_pred = K.cast(y_true, 'float32'), K.cast(y_pred, 'float32')
  TP = K.sum(K.clip(y_true * y_pred, 0, 1))  # how many
  # TP = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
  # possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
  possible_positives = K.sum(K.clip(y_true, 0, 1))
  return TP / (possible_positives + K.epsilon())
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def _max_attentive_vectors(self, x2, cosine_matrix):
        """Max attentive vectors.

        Calculate max attentive vector for the entire sentence by picking
        the contextual embedding with the highest cosine similarity
        as the attentive vector.

        # Arguments
            x2: sequence vectors, (batch_size, x2_timesteps, embedding_size)
            cosine_matrix: cosine similarities matrix of x1 and x2,
                           (batch_size, x1_timesteps, x2_timesteps)

        # Output shape
            (batch_size, x1_timesteps, embedding_size)
        """
        # (batch_size, x1_timesteps)
        max_x2_step = K.argmax(cosine_matrix, axis=-1)

        embedding_size = K.int_shape(x2)[-1]
        timesteps = K.int_shape(max_x2_step)[-1]
        if timesteps is None:
            timesteps = K.shape(max_x2_step)[-1]

        # collapse time dimension and batch dimension together
        # collapse x2 to (batch_size * x2_timestep, embedding_size)
        x2 = K.reshape(x2, (-1, embedding_size))
        # collapse max_x2_step to (batch_size * h1_timesteps)
        max_x2_step = K.reshape(max_x2_step, (-1,))
        # (batch_size * x1_timesteps, embedding_size)
        max_x2 = K.gather(x2, max_x2_step)
        # reshape max_x2, (batch_size, x1_timesteps, embedding_size)
        attentive_vector = K.reshape(max_x2, K.stack([-1, timesteps, embedding_size]))
        return attentive_vector
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def answer_end_pred(context_encoding, question_attention_vector, context_mask, answer_start_distribution, W, dropout_rate):
    """Answer end prediction layer."""

    # Answer end prediction depends on the start prediction
    def s_answer_feature(x):
        maxind = K.argmax(
            x,
            axis=1,
        )
        return maxind

    x = Lambda(lambda x: K.tf.cast(s_answer_feature(x), dtype=K.tf.int32))(answer_start_distribution)
    start_feature = Lambda(lambda arg: K.tf.gather_nd(arg[0], K.tf.stack(
        [tf.range(K.tf.shape(arg[1])[0]), tf.cast(arg[1], K.tf.int32)], axis=1)))([context_encoding, x])

    start_feature = Lambda(lambda q: repeat_vector(q[0], q[1]))([start_feature, context_encoding])

    # Answer end prediction
    answer_end = Lambda(lambda arg: concatenate([
        arg[0],
        arg[1],
        arg[2],
        multiply([arg[0], arg[1]]),
        multiply([arg[0], arg[2]])
    ]))([context_encoding, question_attention_vector, start_feature])

    answer_end = TimeDistributed(Dense(W, activation='relu'))(answer_end)
    answer_end = Dropout(rate=dropout_rate)(answer_end)
    answer_end = TimeDistributed(Dense(1))(answer_end)

    # apply masking
    answer_end = Lambda(lambda q: masked_softmax(q[0], q[1]))([answer_end, context_mask])
    answer_end = Lambda(lambda q: flatten(q))(answer_end)
    return answer_end
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def one_hot_decoder(data, whole_set):
    ret = []
    if data.ndim == 1: # keras bug ?
        data = np.expand_dims(data, 0)
    for probs in data:
        idx = np.argmax(probs)
        # print idx, whole_set[idx], probs[idx]
        ret.append(whole_set[idx])
    return ret
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def top_one_prob(data):
    ret = []
    if data.ndim == 1: # keras bug ?
        data = np.expand_dims(data, 0)
    for probs in data:
        idx = np.argmax(probs)
        ret.append(probs[idx])
    return ret
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def categorical_accuracy_per_sequence(y_true, y_pred):
    return K.mean(K.min(K.equal(K.argmax(y_true, axis=-1),
                  K.argmax(y_pred, axis=-1)), axis=-1))
项目:semantic-tagging    作者:bjerva    | 项目源码 | 文件源码
def calculate_accuracy(model, y, classes):
    '''
    TODO: Document
    '''
    if args.aux:
        classes = classes[0]
        y = y[0]

    sent_tags = []
    corr, err = 0, 0
    for idx, sentence in enumerate(y):
        sent_tags.append([])
        for idy, word in enumerate(sentence):
            gold_tag = np.argmax(word)
            if gold_tag <= 1:
                continue

            pred_tag = np.argmax(classes[idx, idy])
            if pred_tag == gold_tag:
                corr += 1
            else:
                err += 1

            indices = [idx, idy]

            sent_tags[-1].append((indices, gold_tag, pred_tag))

    print('Corr: {0}, Err: {1}'.format(corr, err))
    accuracy = corr / float(corr+err)
    print('Accuracy without dummy labels', accuracy)

    return classes, accuracy, sent_tags
项目:semantic-tagging    作者:bjerva    | 项目源码 | 文件源码
def actual_accuracy(act, pred):
    '''
    Calculate accuracy each batch.
    Keras' standard calculation factors in our padding classes. We don't.
    FIXME: Not always working
    '''
    act_argm  = K.argmax(act, axis=-1)   # Indices of act. classes
    pred_argm = K.argmax(pred, axis=-1)  # Indices of pred. classes

    incorrect = K.cast(K.not_equal(act_argm, pred_argm), dtype='float32')
    correct   = K.cast(K.equal(act_argm, pred_argm), dtype='float32')
    padding   = K.cast(K.equal(K.sum(act), 0), dtype='float32')
    start     = K.cast(K.equal(act_argm, 0), dtype='float32')
    end       = K.cast(K.equal(act_argm, 1), dtype='float32')

    pad_start     = K.maximum(padding, start)
    pad_start_end = K.maximum(pad_start, end) # 1 where pad, start or end

    # Subtract pad_start_end from correct, then check equality to 1
    # E.g.: act: [pad, pad, pad, <s>, tag, tag, tag, </s>]
    #      pred: [pad, tag, pad, <s>, tag, tag, err, </s>]
    #   correct: [1,     0,   1,   1,   1,   1,   0,    1]
    #     p_s_e: [1,     1,   1,   1,,  0,   0,   0,    1]
    #  corr-pse: [0,    -1,   0,   0,   1,   1,   0,    0] # Subtraction
    # actu_corr: [0,     0,   0,   0,   1,   1,   0,    0] # Check equality to 1
    corr_preds   = K.sum(K.cast(K.equal(correct - pad_start_end, 1), dtype='float32'))
    incorr_preds = K.sum(K.cast(K.equal(incorrect - pad_start_end, 1), dtype='float32'))
    total = corr_preds + incorr_preds
    accuracy = corr_preds / total

    return accuracy
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask)
项目:emnlp2017-bilstm-cnn-crf    作者:UKPLab    | 项目源码 | 文件源码
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    '''Variant of sparse_chain_crf_loss but with one-hot encoded tags y.'''
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask)
项目:SGAITagger    作者:zhiweiuu    | 项目源码 | 文件源码
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    '''Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy.'''
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y
项目:Keras-FCN    作者:theduynguyen    | 项目源码 | 文件源码
def pixel_acc(y_true, y_pred):
    s = K.shape(y_true)

    # reshape such that w and h dim are multiplied together
    y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) )
    y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) )

    # correctly classified
    clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1])
    correct_pixels_per_class = K.cast( K.equal(clf_pred,y_true_reshaped), dtype='float32')

    return K.sum(correct_pixels_per_class) / K.cast(K.prod(s), dtype='float32')
项目:Neural-Style-Transfer-Windows    作者:titu1994    | 项目源码 | 文件源码
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    # we want cross-correlation here so flip the kernels
    convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax
项目:wavenet    作者:basveeling    | 项目源码 | 文件源码
def categorical_mean_squared_error(y_true, y_pred):
    """MSE for categorical variables."""
    return K.mean(K.square(K.argmax(y_true, axis=-1) -
                           K.argmax(y_pred, axis=-1)))
项目:deepcpg    作者:cangermueller    | 项目源码 | 文件源码
def cat_acc(y, z):
    """Compute categorical accuracy given one-hot matrices."""
    weights = _cat_sample_weights(y)
    _acc = K.cast(K.equal(K.argmax(y, axis=-1),
                          K.argmax(z, axis=-1)),
                  K.floatx())
    _acc = K.sum(_acc * weights) / K.sum(weights)
    return _acc
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def building_accuracy(y_true, y_pred):
        idx_true = K.argmax(y_true, axis=-1)
        idx_pred = K.argmax(y_pred, axis=-1)
        bld_true = tf.map_fn(bld_idx, idx_true)
        bld_pred = tf.map_fn(bld_idx, idx_pred)
        return K.cast(K.equal(bld_true, bld_pred), K.floatx())
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def floor_accuracy(y_true, y_pred):
        idx_true = K.argmax(y_true, axis=-1)
        idx_pred = K.argmax(y_pred, axis=-1)
        flr_true = tf.map_fn(flr_idx, idx_true)
        flr_pred = tf.map_fn(flr_idx, idx_pred)
        return K.cast(K.equal(flr_true, flr_pred), K.floatx())

    # append a classifier to the model
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def building_accuracy(y_true, y_pred):
        idx_true = K.argmax(y_true, axis=-1)
        idx_pred = K.argmax(y_pred, axis=-1)
        bld_true = tf.map_fn(bld_idx, idx_true)
        bld_pred = tf.map_fn(bld_idx, idx_pred)
        return K.cast(K.equal(bld_true, bld_pred), K.floatx())
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def floor_accuracy(y_true, y_pred):
        idx_true = K.argmax(y_true, axis=-1)
        idx_pred = K.argmax(y_pred, axis=-1)
        flr_true = tf.map_fn(flr_idx, idx_true)
        flr_pred = tf.map_fn(flr_idx, idx_pred)
        return K.cast(K.equal(flr_true, flr_pred), K.floatx())

    # append a classifier to the model
项目:image-analogies    作者:awentzonline    | 项目源码 | 文件源码
def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    convs = None
    if K.backend() == 'theano':
        # HACK: This was not being performed on the GPU for some reason.
        from theano.sandbox.cuda import dnn
        if dnn.dnn_available():
            convs = dnn.dnn_conv(
                img=a, kerns=b[:, :, ::-1, ::-1], border_mode='valid')
    if convs is None:
        convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_semeval(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return (f1_class[0] + f1_class[2]) / 2.0
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'),
                              dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return f1_class[1]
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_keras(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true)[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
                          name='precision_f1_semeval')

    # recall for each class
    recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
                       name='racall_f1_semeval')

    # f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
                         2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')

    # return average f1 score over all classes
    return K.mean(f1_class)
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_semeval(y_true, y_pred):
    #convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    #y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    #indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
    indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'), dim=-1)
    indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
    indices = K.concatenate((indices_x, indices_y))
    values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
    shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
    delta = tf.SparseTensor(indices, values, shape)

    y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)

    #where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true*y_pred_ones, axis=0)

    #for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    #for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    #precision for each class
    precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred/pred_cnt, name='precision_f1_semeval')

    #recall for each class
    recall = tf.select(K.equal(gold_cnt, 0),  K.zeros_like(y_true_pred),  y_true_pred/gold_cnt, name='racall_f1_semeval')

    #f1 for each class
    f1_class = tf.select(K.equal(precision + recall, 0),  K.zeros_like(y_true_pred),  2*(precision*recall)/(precision+recall), name='precision_f1_semeval')

    #return average f1 score over all classes
    return (f1_class[0] + f1_class[2])/2.0
项目:deep-mlsa    作者:spinningbytes    | 项目源码 | 文件源码
def f1_score_task3(y_true, y_pred):
    # convert probas to 0,1
    y_pred_ones = K.zeros_like(y_true)
    # y_pred_ones = K.T.set_subtensor(y_ppred[K.T.arange(y_true.shape[0]), K.argmax(y_pred, axis=-1)], 1)
    indices_x = K.arange(y_true.shape[0])
    indices_y = K.argmax(y_pred, axis=-1)
    indices = K.concatenate(indices_x, indices_y)
    values = K.ones_like(indices_x)
    shape = y_pred_ones.shape
    delta = tf.SparseTensor(indices, values, shape)
    y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1

    # where y_ture=1 and y_pred=1 -> true positive
    y_true_pred = K.sum(y_true * y_pred_ones, axis=0)

    # for each class: how many where classified as said class
    pred_cnt = K.sum(y_pred_ones, axis=0)

    # for each class: how many are true members of said class
    gold_cnt = K.sum(y_true, axis=0)

    # precision for each class
    precision = K.switch(K.equal(pred_cnt, 0), 0, y_true_pred / pred_cnt)

    # recall for each class
    recall = K.switch(K.equal(gold_cnt, 0), 0, y_true_pred / gold_cnt)

    # f1 for each class
    f1_class = K.switch(K.equal(precision + recall, 0), 0, 2 * (precision * recall) / (precision + recall))

    # return average f1 score over all classes
    return f1_class[1]
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def chain_crf_loss(y, x, U, b_start=None, b_end=None, mask=None):
    """Variant of sparse_chain_crf_loss but with one-hot encoded tags y."""
    y_sparse = K.argmax(y, -1)
    y_sparse = K.cast(y_sparse, 'int32')
    return sparse_chain_crf_loss(y_sparse, x, U, b_start, b_end, mask)
项目:anago    作者:Hironsan    | 项目源码 | 文件源码
def viterbi_decode(x, U, b_start=None, b_end=None, mask=None):
    """Computes the best tag sequence y for a given input x, i.e. the one that
    maximizes the value of path_energy."""
    x = add_boundary_energy(x, b_start, b_end, mask)

    alpha_0 = x[:, 0, :]
    gamma_0 = K.zeros_like(alpha_0)
    initial_states = [gamma_0, alpha_0]
    _, gamma = _forward(x,
                        lambda B: [K.cast(K.argmax(B, axis=1), K.floatx()), K.max(B, axis=1)],
                        initial_states,
                        U,
                        mask)
    y = _backward(gamma, mask)
    return y
项目:YAD2K    作者:allanzelener    | 项目源码 | 文件源码
def yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
    """Filter YOLO boxes based on object and class confidence."""
    box_scores = box_confidence * box_class_probs
    box_classes = K.argmax(box_scores, axis=-1)
    box_class_scores = K.max(box_scores, axis=-1)
    prediction_mask = box_class_scores >= threshold

    # TODO: Expose tf.boolean_mask to Keras backend?
    boxes = tf.boolean_mask(boxes, prediction_mask)
    scores = tf.boolean_mask(box_class_scores, prediction_mask)
    classes = tf.boolean_mask(box_classes, prediction_mask)
    return boxes, scores, classes
项目:neural-segmentation    作者:melsner    | 项目源码 | 文件源码
def masked_categorical_accuracy(y_true, y_pred):
    mask = K.cast(K.expand_dims(K.greater(K.argmax(y_true, axis=-1), 0), axis=-1), 'float32')
    accuracy = K.cast(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)), 'float32')
    accuracy *= K.squeeze(mask, -1)
    ## Normalize by number of real segments, using a small non-zero denominator in cases of padding characters
    ## in order to avoid division by zero
    #accuracy /= (K.mean(mask) + (1e-10*(1-K.mean(mask))))
    return accuracy
项目:deepascii    作者:awentzonline    | 项目源码 | 文件源码
def call(self, x, mask=None):
        return K.argmax(x, axis=self.axis)