Python keras.backend 模块,max() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.max()

项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    y_sum = K.sum(y_true * y_pred_decision)

    return (2. * y_sum + K.epsilon()) / (K.sum(y_true) + K.sum(y_pred_decision) + K.epsilon())
项目:mimic3-benchmarks    作者:YerevaNN    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):
        print "\n==>predicting on train"
        self.calc_metrics(self.train_data_gen, self.train_history, 'train', logs)
        print "\n==>predicting on validation"
        self.calc_metrics(self.val_data_gen, self.val_history, 'val', logs)

        if self.early_stopping:
            ihm_max_auc = np.max([x["val_ihm_auroc"] for x in self.val_history])
            ihm_cur_auc = self.val_history[-1]["val_ihm_auroc"]
            pheno_max_auc = np.max([x["val_pheno_ave_auc_macro"] for x in self.val_history])
            pheno_cur_auc = self.val_history[-1]["val_pheno_ave_auc_macro"]
            if (pheno_max_auc > 0.75 and pheno_cur_auc < 0.73) and (ihm_max_auc > 0.85 and ihm_cur_auc < 0.83):
                self.model.stop_training = True


# ===================== LAYERS ===================== #
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def _max_pooling_matching(self, h1, h2, w):
        """Max pooling matching operation.

        # Arguments
            h1: (batch_size, h1_timesteps, embedding_size)
            h2: (batch_size, h2_timesteps, embedding_size)
            w: weights of one direction, (mp_dim, embedding_size)

        # Output shape
            (batch_size, h1_timesteps, mp_dim)
        """
        # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
        h1 = self._time_distributed_multiply(h1, w)
        # h2 * weights, (batch_size, h2_timesteps, mp_dim, embedding_size)
        h2 = self._time_distributed_multiply(h2, w)
        # reshape v1 to (batch_size, h1_timesteps, 1, mp_dim, embedding_size)
        h1 = K.expand_dims(h1, axis=2)
        # reshape v1 to (batch_size, 1, h2_timesteps, mp_dim, embedding_size)
        h2 = K.expand_dims(h2, axis=1)
        # cosine similarity, (batch_size, h1_timesteps, h2_timesteps, mp_dim)
        cos = self._cosine_similarity(h1, h2)
        # (batch_size, h1_timesteps, mp_dim)
        matching = K.max(cos, axis=2)
        return matching
项目:BiMPM_keras    作者:ijinmao    | 项目源码 | 文件源码
def _max_attentive_matching(self, h1, h2, cosine_matrix, w):
        """Max attentive matching operation.

        # Arguments
            h1: (batch_size, h1_timesteps, embedding_size)
            h2: (batch_size, h2_timesteps, embedding_size)
            cosine_matrix: weights of hidden state h2,
                          (batch_size, h1_timesteps, h2_timesteps)
            w: weights of one direction, (mp_dim, embedding_size)

        # Output shape
            (batch_size, h1_timesteps, mp_dim)
        """
        # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
        h1 = self._time_distributed_multiply(h1, w)
        # max attentive vector (batch_size, h1_timesteps, embedding_szie)
        max_attentive_vec = self._max_attentive_vectors(h2, cosine_matrix)
        # max_attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)
        max_attentive_vec = self._time_distributed_multiply(max_attentive_vec, w)
        # matching vector, (batch_size, h1_timesteps, mp_dim)
        matching = self._cosine_similarity(h1, max_attentive_vec)
        return matching
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def masked_softmax(tensor, mask, expand=2, axis=1):
    """Masked soft-max using Lambda and merge-multiplication.

    Args:
        tensor: tensor containing scores
        mask: mask for tensor where 1 - means values at this position and 0 - means void, padded, etc..
        expand: axis along which to repeat mask
        axis: axis along which to compute soft-max

    Returns:
        masked soft-max values
    """

    mask = tf.expand_dims(mask, axis=expand)
    exponentiate = Lambda(lambda x: K.exp(x - K.max(x, axis=axis, keepdims=True)))(tensor)
    masked = tf.multiply(exponentiate, mask)
    div = tf.expand_dims(tf.reduce_sum(masked, axis=axis), axis=axis)
    predicted = tf.divide(masked, div)
    return predicted
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:deep-mil-for-whole-mammogram-classification    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:ppap    作者:unique-horn    | 项目源码 | 文件源码
def setup_output(self, x):
    """
    Setup output tensor

    """
    x_max = K.max(x, axis=1)

    x_max = K.flatten(x_max)
    z = K.dot(x_max, self.w_proj_to_z) #+ self.b_proj_to_z
    hidden = K.dot(z, self.weights[0]) + self.biases[0]
    hidden = K.reshape(hidden, shape=(self.input_channels,
                                      self.hidden_dim))

    output = K.dot(hidden, self.weights[1]) + self.biases[1]

    self.output = K.reshape(output, (self.num_filters, self.input_channels,
                                     *self.output_shape))
    return self.output
项目:kfs    作者:the-moliver    | 项目源码 | 文件源码
def call(self, inputs):
        stim = inputs[0]
        center = inputs[1]
        centers_x = self.XX[None, :, :, None] - center[:, 0, None, None, None] - self.centers[0][None, None, None, :]
        centers_y = self.YY[None, :, :, None] - center[:, 1, None, None, None] - self.centers[1][None, None, None, :]
        senv = self.stds[None, None, None, :]
        gauss = self.gauss_scale * (K.square(self.dx) / (2 * np.pi * K.square(senv) + K.epsilon()))*K.exp(-(K.square(centers_x) + K.square(centers_y))/(2.0 * K.square(senv)))
        # gauss = (1 / K.sqrt(2 * np.pi * K.square(senv) + K.epsilon()))*K.exp(-(K.square(centers_x) + K.square(centers_y))/(2.0 * K.square(senv)))
        # gauss /= K.max(gauss, axis=(1, 2), keepdims=True)
        gauss = K.reshape(gauss, self.kernel_shape)

        if K.backend() == 'theano':
            output = K.sum(stim[..., None] * K.pattern_broadcast(gauss, self.kernel_broadcast), axis=self.filter_axes, keepdims=False)
        else:
            output = K.sum(stim[..., None] * gauss, axis=self.filter_axes, keepdims=False)

        return output
项目:DeepMoji    作者:bfelbo    | 项目源码 | 文件源码
def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
项目:Hotpot    作者:Liang-Qiu    | 项目源码 | 文件源码
def prep_model(inputs, N, s0pad, s1pad, c):
    # Word-level projection before averaging
    inputs[0] = TimeDistributed(Dense(N, activation='relu'))(inputs[0])
    inputs[0] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[0])
    inputs[1] = TimeDistributed(Dense(N, activation='relu'))(inputs[1])
    inputs[1] = Lambda(lambda x: K.max(x, axis=1), output_shape=(N, ))(inputs[1])
    merged = concatenate([inputs[0], inputs[1]])

    # Deep
    for i in range(c['deep']):
        merged = Dense(c['nndim'], activation=c['nnact'])(merged)
        merged = Dropout(c['nndropout'])(merged)
        merged = BatchNormalization()(merged)

    is_duplicate = Dense(1, activation='sigmoid')(merged)
    return [is_duplicate], N
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def SP_pixelwise_loss(y_true,y_pred):
    y_true_label=y_true[:,:class_number,:,:]
    y_true_SP_weight=y_true[:,class_number:,:,:]

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    sample_num_per_class=K.sum(y_true_label,axis=[2,3],keepdims=True)
    class_ind=K.cast(K.greater(sample_num_per_class,0.),'float32')
    avg_sample_num_per_class=K.sum(sample_num_per_class,axis=1,keepdims=True)/K.sum(class_ind,axis=1,keepdims=True)
    sample_weight_per_class=avg_sample_num_per_class/(sample_num_per_class+0.1)
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)
    pixel_wise_loss=-K.log(y_pred_softmax)*y_true_label
    pixel_wise_loss=pixel_wise_loss*sample_weight_per_class
    weighter_pixel_wise_loss=K.sum(pixel_wise_loss,axis=1,keepdims=True)

    return K.mean(weighter_pixel_wise_loss*y_true_SP_weight)

#label distribution loss
项目:AdaptationSeg    作者:YangZhang4065    | 项目源码 | 文件源码
def layout_loss_hard(y_true,y_pred):

    y_pred=K.clip(y_pred,-50.,50.)#prevent overflow
    exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True))
    y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True)

    max_pred_softmax=K.max(y_pred_softmax,axis=1,keepdims=True)
    bin_pred_softmax_a=y_pred_softmax/max_pred_softmax
    bin_pred_softmax=bin_pred_softmax_a**6.

    final_pred=K.mean(bin_pred_softmax,axis=[2,3])
    final_pred=final_pred/(K.sum(final_pred,axis=1,keepdims=True)+K.epsilon())
    y_true_s=K.squeeze(y_true,axis=3)
    y_true_s=K.squeeze(y_true_s,axis=2)
    tier_wise_loss_v=-K.clip(K.log(final_pred),-500,500)*y_true_s
    return K.mean(K.sum(tier_wise_loss_v,axis=1))


#compile
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def custom_objective(y_true, y_pred):
    #prediction = Flatten(name='flatten')(dense_3)
    #prediction = ReRank(k=k, label=1, name='output')(prediction)
    #prediction = SoftReRank(softmink=softmink, softmaxk=softmaxk, label=1, name='output')(prediction)
    '''Just another crossentropy'''
    #y_true = K.clip(y_true, _EPSILON, 1.0-_EPSILON)
    y_true = K.max(y_true)
    #y_armax_index = numpy.argmax(y_pred)
    y_new = K.max(y_pred)
    #y_new = max(y_pred)
    '''
    if y_new >= 0.5:
        y_new_label = 1
    else:
        y_new_label = 0
    cce = abs(y_true - y_new_label)
    '''
    logEps=1e-8
    cce = - (y_true * K.log(y_new+logEps) + (1 - y_true)* K.log(1-y_new + logEps))
    return cce
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        import theano.tensor as T
        newx = T.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return newx
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:rna_protein_binding    作者:wentaozhu    | 项目源码 | 文件源码
def call(self, x,mask=None):
        newx = K.sort(x)
        #response = K.reverse(newx, axes=1)
        #response = K.sum(x> 0.5, axis=1) / self.k
        return K.concatenate([newx[:,:self.softmink], newx[:,newx.shape[1]-self.softmaxk:]], axis=-1)
        #response = K.reshape(newx,[-1,1])
        #return K.concatenate([1-response, response], axis=self.label)
        #response = K.reshape(x[:,self.axis], (-1,1))
        #return K.concatenate([1-response, response], axis=self.axis)
        #e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        #s = K.sum(e, axis=self.axis, keepdims=True)
        #return e / s
项目:MSgothicPolice    作者:ysdyt    | 项目源码 | 文件源码
def call(self, x):

        input_shape = K.shape(x)

        len_i, len_j = input_shape[1], input_shape[2]

        outputs = []

        for nb_bins in self.nb_bins_per_level:
            bin_size_i = K.cast(len_i, 'int32') // nb_bins
            bin_size_j = K.cast(len_j, 'int32') // nb_bins

            for i, j in product(range(nb_bins), range(nb_bins)):
                # each combination of i,j is a unique rectangle
                i1, i2 = bin_size_i * i, bin_size_i * (i + 1)
                j1, j2 = bin_size_j * j, bin_size_j * (j + 1)

                pooled_features = K.max(x[:, i1:i2, j1:j2, :], axis=(1, 2))

                outputs.append(pooled_features)

        return K.concatenate(outputs, axis=1)
项目:gymexperiments    作者:tambetm    | 项目源码 | 文件源码
def createLayers():
    x = Input(shape=env.observation_space.shape)
    if args.batch_norm:
        h = BatchNormalization()(x)
    else:
        h = x
    for i in xrange(args.layers):
        h = Dense(args.hidden_size, activation=args.activation)(h)
        if args.batch_norm and i != args.layers - 1:
            h = BatchNormalization()(h)
    y = Dense(env.action_space.n + 1)(h)
    if args.advantage == 'avg':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True), output_shape=(env.action_space.n,))(y)
    elif args.advantage == 'max':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True), output_shape=(env.action_space.n,))(y)
    elif args.advantage == 'naive':
        z = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) + a[:, 1:], output_shape=(env.action_space.n,))(y)
    else:
        assert False

    return x, z
项目:DeepIV    作者:jhartford    | 项目源码 | 文件源码
def mix_gaussian_loss(x, mu, log_sig, w):
    '''
    Combine the mixture of gaussian distribution and the loss into a single function
    so that we can do the log sum exp trick for numerical stability...
    '''
    if K.backend() == "tensorflow":
        x.set_shape([None, 1])
    gauss = log_norm_pdf(K.repeat_elements(x=x, rep=mu.shape[1], axis=1), mu, log_sig)
    # TODO: get rid of clipping.
    gauss = K.clip(gauss, -40, 40)
    max_gauss = K.maximum((0.), K.max(gauss))
    # log sum exp trick...
    gauss = gauss - max_gauss
    out = K.sum(w * K.exp(gauss), axis=1)
    loss = K.mean(-K.log(out) + max_gauss)
    return loss
项目:ensemble-adv-training    作者:ftramer    | 项目源码 | 文件源码
def gen_adv_loss(logits, y, loss='logloss', mean=False):
    """
    Generate the loss function.
    """

    if loss == 'training':
        # use the model's output instead of the true labels to avoid
        # label leaking at training time
        y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
        y = y / K.sum(y, 1, keepdims=True)
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    elif loss == 'logloss':
        out = K.categorical_crossentropy(logits, y, from_logits=True)
    else:
        raise ValueError("Unknown loss: {}".format(loss))

    if mean:
        out = K.mean(out)
    else:
        out = K.sum(out)
    return out
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def call(self, x, mask=None):
        w = self.W
        w_rot = [w]
        for i in range(3):
            w = shift_rotate(w, shift=2)
            w_rot.append(w)

        outputs = tf.stack([K.conv2d(x, w_i, strides=self.subsample,
                                     border_mode=self.border_mode,
                                     dim_ordering=self.dim_ordering,
                                     filter_shape=self.W_shape) for w_i in w_rot])

        output = K.max(outputs, 0)

        if self.bias:
            if self.dim_ordering == 'th':
                output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
            elif self.dim_ordering == 'tf':
                output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
            else:
                raise ValueError('Invalid dim_ordering:', self.dim_ordering)
        output = self.activation(output)
        return output
项目:deep-learning-experiments    作者:raghakot    | 项目源码 | 文件源码
def call(self, x, mask=None):
        w = self.W
        w_rot = [w]
        for i in range(7):
            w = shift_rotate(w)
            w_rot.append(w)

        outputs = tf.stack([K.conv2d(x, w_i, strides=self.subsample,
                                     border_mode=self.border_mode,
                                     dim_ordering=self.dim_ordering,
                                     filter_shape=self.W_shape) for w_i in w_rot])

        output = K.max(outputs, 0)

        if self.bias:
            if self.dim_ordering == 'th':
                output += K.reshape(self.b, (1, self.nb_filter, 1, 1))
            elif self.dim_ordering == 'tf':
                output += K.reshape(self.b, (1, 1, 1, self.nb_filter))
            else:
                raise ValueError('Invalid dim_ordering:', self.dim_ordering)
        output = self.activation(output)
        return output
项目:mctest-model    作者:Maluuba    | 项目源码 | 文件源码
def get_output_shape_for(self, input_shape):
        if self.mode in ['max', 'mean', 'sum']:
            shape_list = list(input_shape)
            del shape_list[self.axis]
            return tuple(shape_list)
        elif self.mode in ['concat']:
            assert len(input_shape) >= 2
            stacked_dim = 0
            for inp_shape in input_shape[1:]:
                # Special treatment for 2D matrices
                if len(inp_shape) == 2:
                    stacked_dim += 1 if self.axis == 1 else inp_shape[-1]
                else:
                    stacked_dim += inp_shape[self.axis]
            return_shape = list(input_shape[0])
            return_shape[self.axis] += stacked_dim
            return tuple(return_shape)
        else:
            raise NotImplemented
项目:mctest-model    作者:Maluuba    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if self.mode == 'max':
            return K.max(x, axis=self.axis)
        elif self.mode == 'mean':
            return K.mean(x, axis=self.axis)
        elif self.mode == 'sum':
            return K.sum(x, axis=self.axis)
        elif self.mode == 'concat':
            assert len(x) >= 2
            assert x[0].ndim == 3

            def _transform(target):
                # Expand first dimension in any case
                target = K.expand_dims(target, dim=1)
                if self.axis == 2:
                    # Repeat target along the time dimension
                    target = K.repeat_elements(
                        target, x[0].shape[1], axis=1)
                return target

            targets = map(lambda t: _transform(t) if t.ndim == 2 else t, x[1:])
            return K.concatenate([x[0]] + targets, axis=self.axis)
        else:
            raise NotImplemented
项目:mctest-model    作者:Maluuba    | 项目源码 | 文件源码
def call(self, x, mask=None):
        X = x[0]  # shape is n_b x n_s x 4 x n_w_s x n_w_a

        ax = 2 if self.is_q else 3
        # reduce over n_w_s
        output = T.max(X, axis=ax)
        output = T.switch(T.gt(output, self.threshold), output, 0)
        # reduce over n_w_a
        if self.wordbyword_merge_type == 'max':
            output = T.max(output, axis=ax)  # get max max_sim for each a  # n_b x n_s x 4
        elif self.wordbyword_merge_type == 'mask_average':
            output = masked_mean(output, axis=ax)  # get average max_sim for each a
        elif self.wordbyword_merge_type == 'weighted_average':
            weight_layer = x[1]
            output = weighted_average(output, weight_layer, axis=ax)
        output = output * self.alpha
        return output
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def time_distributed_nonzero_max_pooling(x):
    """
    Computes maximum along the first (time) dimension.
    It ignores the mask m.

    In:
        x - input; a 3D tensor
        mask_value - value to mask out, if None then no masking; 
            by default 0.0, 
    """

    import theano.tensor as T

    mask_value=0.0
    x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
    masked_max_x = x.max(axis=1)
    # replace infinities with mask_value
    masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
    return masked_max_x
项目:visual_turing_test-tutorial    作者:mateuszmalinowski    | 项目源码 | 文件源码
def time_distributed_masked_max(x, m):
    """
    Computes max along the first (time) dimension.

    In:
        x - input; a 3D tensor
        m - mask
        m_value - value for masking
    """
    # place infinities where mask is off
    m_value = 0.0
    tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
    x_with_inf = x + K.expand_dims(tmp)
    x_max = K.max(x_with_inf, axis=1) 
    r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
    return r 


## classes  ##

# Transforms existing layers to masked layers
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_1(y):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true, y_pred = y
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :,1]
    mask_pred = y_pred_decision[:, :, :, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_2(y):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true, y_pred = y
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_3(y):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true, y_pred = y
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_coef(y_true, y_pred):

    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred_decision)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_core(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true1 = y_true[:, :, :, :, 3:]
    mask_true2 = y_true[:, :, :, :, 1:2]
    mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4)
    mask_pred1 = y_pred_decision[:, :, :, :, 3:]
    mask_pred2 = y_pred_decision[:, :, :, :, 1:2]
    mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_enhance(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())

# def accuracy_survival(y_true, y_predicted):
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_whole_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    # mask = K.expand_dims(K.sum(y_true,axis=4),axis=4)
    # cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4)
    # y_pred = y_pred + cmp_mask

    y_true = y_true[:,:,:,:,:3]
    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = K.sum(y_true, axis=4)
    mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_core_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true = y_true[:,:,:,:,:3]


    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision)


    mask_true1 = K.expand_dims(y_true[:, :, :, :, 2],axis=4)
    mask_true2 = K.expand_dims(y_true[:, :, :, :, 0],axis=4)
    mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4)
    mask_pred1 = K.expand_dims(y_pred_decision[:, :, :, :, 2],axis=4)
    mask_pred2 = K.expand_dims(y_pred_decision[:, :, :, :, 0],axis=4)
    mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_enhance_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true = y_true[:,:,:,:,:3]

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    # y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision)



    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2] * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_1(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :,1]
    mask_pred = y_pred_decision[:, :, :, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_1_2D(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon())/ K.max(y_pred, axis=2, keepdims=True))

    mask_true = y_true[:, :, 1]
    mask_pred = y_pred_decision[:, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_2(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_3(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_4(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = y_true[:, :, :, :, 4]
    mask_pred = y_pred_decision[:, :, :, :, 4]

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def precision_1(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 1]
    mask_pred = y_pred_decision[:, :, :, :, 1]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def precision_2(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def precision_3(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def precision_4(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 4]
    mask_pred = y_pred_decision[:, :, :, :, 4]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def recall_0(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 0]
    mask_pred = y_pred_decision[:, :, :, :, 0]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def recall_2(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())