Python keras.backend 模块,pool2d() 实例源码

我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用keras.backend.pool2d()

项目:Deep-Learning-with-Keras    作者:PacktPublishing    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        half_n = self.n // 2
        squared = K.square(x)
        pooled = K.pool2d(squared, (half_n, half_n), strides=(1, 1),
                         padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def weightedLoss(y_true, y_pred):
    # compute weights
    # a = cv2.blur(y_true, (11,11))
    # ind = (a > 0.01) * (a < 0.99)
    # ind = ind.astype(np.float32)
    # weights = np.ones(a.shape)
    a = K.pool2d(y_true, (11,11), strides=(1, 1), padding='same', data_format=None, pool_mode='avg')
    ind = K.cast(K.greater(a, 0.01), dtype='float32') * K.cast(K.less(a, 0.99), dtype='float32')

    weights = K.cast(K.greater_equal(a, 0), dtype='float32')
    w0 = K.sum(weights)
    # w0 = weights.sum()
    weights = weights + ind * 2
    w1 = K.sum(weights)
    # w1 = weights.sum()
    weights = weights / w1 * w0
    return weightedBCELoss2d(y_true, y_pred, weights) + weightedSoftDiceLoss(y_true, y_pred, weights)
项目:deeplearning_keras    作者:gazzola    | 项目源码 | 文件源码
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        half_n = self.n // 2
        squared = K.square(x)
        pooled = K.pool2d(squared, (half_n, half_n), strides=(1, 1),
                         padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = (self.alpha / self.n) * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def weighted_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    if K.int_shape(y_pred)[1] == 128:
        kernel_size = 11
    elif K.int_shape(y_pred)[1] == 256:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 512:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 1024:
        kernel_size = 41
    else:
        raise ValueError('Unexpected image size')
    averaged_mask = K.pool2d(
        y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = 1 - weighted_dice_coeff(y_true, y_pred, weight)
    return loss
项目:Kaggle-Carvana-Image-Masking-Challenge    作者:petrosgk    | 项目源码 | 文件源码
def weighted_bce_dice_loss(y_true, y_pred):
    y_true = K.cast(y_true, 'float32')
    y_pred = K.cast(y_pred, 'float32')
    # if we want to get same size of output, kernel size must be odd number
    if K.int_shape(y_pred)[1] == 128:
        kernel_size = 11
    elif K.int_shape(y_pred)[1] == 256:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 512:
        kernel_size = 21
    elif K.int_shape(y_pred)[1] == 1024:
        kernel_size = 41
    else:
        raise ValueError('Unexpected image size')
    averaged_mask = K.pool2d(
        y_true, pool_size=(kernel_size, kernel_size), strides=(1, 1), padding='same', pool_mode='avg')
    border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32')
    weight = K.ones_like(averaged_mask)
    w0 = K.sum(weight)
    weight += border * 2
    w1 = K.sum(weight)
    weight *= (w0 / w1)
    loss = weighted_bce_loss(y_true, y_pred, weight) + (1 - weighted_dice_coeff(y_true, y_pred, weight))
    return loss
项目:pepnet    作者:hammerlab    | 项目源码 | 文件源码
def compute_mask(self, inputs, mask=None):
        """Computes an output mask tensor.

        # Arguments
            inputs: Tensor or list of tensors.
            mask: Tensor or list of tensors.

        # Returns
            None or a tensor (or list of tensors,
                one per output tensor of the layer).
        """
        if mask is None:
            return None
        # dimensions of mask should be (batch_size, time_steps)
        assert mask.ndim == 2
        # add a dummy dimension so that the shape is now
        # (batch_size, time_steps, 1)
        mask = K.expand_dims(mask, 2)
        # now add a fake 2nd spatial dimension
        # (batch_size, time_steps, 1, 1)
        mask = K.expand_dims(mask, 3)
        strides = self.strides + (1,)
        pool_size = self.pool_size + (1,)
        mask = K.pool2d(
            mask,
            pool_size=pool_size,
            strides=strides,
            padding=self.padding,
            data_format="channels_last",
            pool_mode='max')
        # get rid of dummy dimensions
        mask = K.squeeze(mask, 3)
        mask = K.squeeze(mask, 2)
        return mask
项目:Ultras-Sound-Nerve-Segmentation---Kaggle    作者:Simoncarbo    | 项目源码 | 文件源码
def call(self, x, mask=None):
        stride_row, stride_col = self.subsample
        nb_filter,_,_,_ = self.W_shape

        if self.dim_ordering == 'th':
            if K._backend == 'theano':
                x = x.reshape([x.shape[0],1,x.shape[1],x.shape[2],x.shape[3]])
                # x has shape (batchsize,1,input_nbfilter,input_rows,input_cols)
                # W has shape (nb_filter , input_nbfilter,input_rows,input_cols)
                output = K.sum(x*self.W,axis = 2) # uses broadcasting, sums over input filters
                if stride_row>1 or stride_col >1:
                    # sum pooling isn't working -> avg pooling multiplied by number of elements/pool
                    output = (stride_row*stride_col)*K.pool2d(output,(stride_row, stride_col),(stride_row, stride_col),pool_mode = 'avg')
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        if self.bias:
            if self.dim_ordering == 'th':
                output += K.reshape(self.b, (1, nb_filter, self.output_row, self.output_col))
            elif self.dim_ordering == 'tf':
                output += K.reshape(self.b, (1, self.output_row, self.output_col, nb_filter))
            else:
                raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        output = self.activation(output)
        return output
项目:NeuralSentenceOrdering    作者:FudanNLP    | 项目源码 | 文件源码
def get_output(self, train=False):
        #output = K.pool2d(x = train, pool_size = (self.pool_length,1), 
        #                  border_mode = self.border_mode, pool_mode='max')
        pool_size = (self.pool_length, 1)
        strides = (self.pool_length, 1)
        ignore_border = True
        padding = (0, 0)
        output = downsample.max_pool_2d(train, ds=pool_size, st=strides,
                                          ignore_border=ignore_border,
                                          padding=padding,
                                          mode='max')
        return output