Python keras.layers.merge 模块,add() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.merge.add()

项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:shenlan    作者:vector-1127    | 项目源码 | 文件源码
def _shortcut(input, residual): # ??????????????????y=F(x)+x
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001))(input)

    return add([shortcut, residual])
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __initial_conv_block(input, k=1, dropout=0.0, initial=False):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if initial:
        if K.image_dim_ordering() == 'th':
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
        else:
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)

    x = BatchNormalization(axis=channel_axis)(input)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    m = add([init, x])
    return m
项目:keras-resnet    作者:raghakot    | 项目源码 | 文件源码
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001))(input)

    return add([shortcut, residual])
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,strides=2,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=4,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,2,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
项目:rldurak    作者:janEbert    | 项目源码 | 文件源码
def create_model(self, epsilon):
        """Return a compiled model and the state and action input
        layers with the given epsilon for numerical stability.
        """
        inputs = Input(shape=(self.state_shape,))
        action_input = Input(shape=(self.action_shape,))
        x1 = Dense(self.neurons_per_layer[0], activation='relu')(inputs)
        x1 = Dense(self.neurons_per_layer[1], activation='relu')(x1)
        x2 = Dense(self.neurons_per_layer[1], activation='relu')(action_input)
        x = add([x1, x2])
        for n in self.neurons_per_layer[2:]:
            x = Dense(n, activation='relu')(x)
        outputs = Dense(self.action_shape)(x)

        model = Model(inputs=[inputs, action_input], outputs=outputs)

        assert self.optimizer_choice in ['adam', 'rmsprop']
        if self.optimizer_choice == 'adam':
            opti = Adam(lr=self.alpha, epsilon=epsilon)
        else:
            opti = RMSprop(lr=self.alpha, epsilon=epsilon)
        model.compile(optimizer=opti, loss='mse')
        return model, inputs, action_input
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def create_model(img_height,img_width,img_channel):
    ip = Input(shape=(img_height, img_width,img_channel))
    L_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
    L_1 = LeakyReLU(alpha=0.25)(L_1)
    L_2=L_1
    for i in range(3):
        L_2 = residual_block(L_2, 64,3)

    L_3 = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(L_2)
    L_3 = BatchNormalization(axis=-1)(L_3)
    L_3 = add([L_1,L_3])
    L_4= Conv2D(128, (1, 1), padding='same',kernel_initializer='glorot_uniform')(L_3)
    op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(L_4)

    deblocking =Model(inputs=ip,outputs= op)
    optimizer = optimizers.Adam(lr=1e-4)
    deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
    return deblocking
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def create_model(img_height,img_width,img_channel):
    ip = Input(shape=(img_height, img_width,img_channel))
    x_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip)
    x_1 = LeakyReLU(alpha=0.25)(x_1)
    x=x_1
    for i in range(5):#or 15
        x = residual_block(x, 64,3)

    x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x)
    x = BatchNormalization(axis=-1)(x)
    x = add([x_1,x])

    x=upscale(x)
    op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x)

    deblocking =Model(inputs=ip,outputs= op)
    optimizer = optimizers.Adam(lr=1e-4)
    deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim])
    return deblocking
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001))(input)

    return add([shortcut, residual])
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        super(KerasLenetModel, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        self.model = Sequential()
        self.model.add(Convolution2D(32, (3, 3), activation='relu',
                                input_shape=(norm_shape[0], norm_shape[1], 1)))
        self.model.add(Convolution2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.max_n_label, activation='softmax'))

        # 8. Compile model
        self.model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def unit_2(in_layer, n1=64, n2=64, n3=256, p2=1, d2=1):
    '''
    Shortcut Unit
    :param in_layer:
    :return:
    '''
    x = Conv2D(n1, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
    x = BatchNormalization(momentum=0.95)(x)
    x = Activation('relu')(x)

    x = ZeroPadding2D(padding=(p2, p2))(x)
    x = Conv2D(n2, (3, 3), strides=(1, 1), padding='valid', dilation_rate=(d2, d2), kernel_initializer=he_uniform(), use_bias=False)(x)
    x = BatchNormalization(momentum=0.95)(x)
    x = Activation('relu')(x)

    x = Conv2D(n3, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(x)
    x = BatchNormalization(momentum=0.95)(x)

    x = add([in_layer, x])
    x = Activation('relu')(x)
    return x
项目:skp_edu_docker    作者:TensorMSA    | 项目源码 | 文件源码
def _shortcut(input, residual):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001))(input)

    return add([shortcut, residual])
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = UpSampling2D(size=(2, 2))(other)

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        decoder = Activation('relu')(decoder)

    return decoder
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = MaxUnpooling2D()([other, reverse_module])

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        # decoder = Activation('relu')(decoder)
        decoder = PReLU(shared_axes=[1, 2])(decoder)

    return decoder
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input_feature)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input_feature
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        print('reshaping via a convolution...')
        if conv_name_base is not None:
            conv_name_base = conv_name_base + '1'
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001),
                          name=conv_name_base)(input_feature)
        if bn_name_base is not None:
            bn_name_base = bn_name_base + '1'
        shortcut = BatchNormalization(axis=CHANNEL_AXIS, name=bn_name_base)(shortcut)

    return add([shortcut, residual])
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __conv2_block(input, k=1, dropout=0.0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_dim_ordering() == 'th':
        if init._keras_shape[1] != 16 * k:
            init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)
    else:
        if init._keras_shape[-1] != 16 * k:
            init = Conv2D(16 * k, (1, 1), activation='linear', padding='same')(init)

    x = Conv2D(16 * k, (3, 3), padding='same')(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Conv2D(16 * k, (3, 3), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    m = add([init, x])
    return m
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def __conv3_block(input, k=1, dropout=0.0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 32 * k, else create convolution2d for this input
    if K.image_dim_ordering() == 'th':
        if init._keras_shape[1] != 32 * k:
            init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)
    else:
        if init._keras_shape[-1] != 32 * k:
            init = Conv2D(32 * k, (1, 1), activation='linear', padding='same')(init)

    x = Conv2D(32 * k, (3, 3), padding='same')(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Conv2D(32 * k, (3, 3), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    m = add([init, x])
    return m
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def ___conv4_block(input, k=1, dropout=0.0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 64 * k, else create convolution2d for this input
    if K.image_dim_ordering() == 'th':
        if init._keras_shape[1] != 64 * k:
            init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)
    else:
        if init._keras_shape[-1] != 64 * k:
            init = Conv2D(64 * k, (1, 1), activation='linear', padding='same')(init)

    x = Conv2D(64 * k, (3, 3), padding='same')(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Conv2D(64 * k, (3, 3), padding='same')(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    m = add([init, x])
    return m
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:thaanaOCR    作者:Sofwath    | 项目源码 | 文件源码
def on_epoch_end(self, epoch, logs={}):

        font = {'family': 'Thaana Unicode Akeh',
                'color':  'darkred',
                'weight': 'normal',
                'size': 12,
        }

        self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))

        #self.model.save('model.h5')  # creates a HDF5 file 

        self.show_edit_distance(256)
        self.num_display_words = 6 
        word_batch = next(self.text_img_gen)[0]
        # add visual augmentations 
        res = decode_batch(self.test_func, word_batch['the_input'][0:6])
        if word_batch['the_input'][0].shape[0] < 256:
            cols = 2
        else:
            cols = 1
        for i in range(self.num_display_words):
            plt.subplot(self.num_display_words // cols, cols, i + 1)

            if K.image_data_format() == 'channels_first':
                the_input = word_batch['the_input'][i, 0, :, :]
            else:
                the_input = word_batch['the_input'][i, :, :, 0]
            plt.imshow(the_input.T, cmap='Greys_r')
            plt.xlabel('Actual = \'%s\'\Prediction = \'%s\'' % (word_batch['source_str'][i][::-1], res[i][::-1]),fontdict=font)
            #print (('Actual = \'%s\'\Prediction = \'%s\'' % (word_batch['source_str'][i], res[i])))
        fig = plt.gcf()

        fig.set_size_inches(10, 13)
        plt.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))

        plt.close()
项目:pic2vec    作者:datarobot    | 项目源码 | 文件源码
def test_splice_layer():
    """Test method splices tensors correctly"""
    # Create spliced and added layers via splicing function
    list_of_spliced_layers = _splice_layer(SPLICING_TENSOR, 3)
    # Add each of the layers together
    x = add(list_of_spliced_layers)
    # Create the spliced and added layers by hand
    check_layer = K.constant(9, shape=(3, 4))
    # Check the math
    assert np.allclose(K.eval(check_layer), K.eval(x), atol=ATOL)
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
项目:deblocking    作者:yydlmzyz    | 项目源码 | 文件源码
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        super(KerasLenetExt1Model, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        self.model = Sequential()
        self.model.add(Convolution2D(64, (3, 3), activation='relu',
                                input_shape=(norm_shape[0], norm_shape[1], 1)))

        self.model.add(Convolution2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(1024, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.max_n_label, activation='softmax'))

        # 8. Compile model
        self.model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        self.final_fc_nm = kwargs.get("final_fc_nm", 2048)
        if "final_fc_nm" in kwargs:
            kwargs.pop("final_fc_nm")
        super(KerasLenetExt2Model, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        self.model = Sequential()
        self.model.add(Convolution2D(64, (3, 3), activation='relu',
                                input_shape=(norm_shape[0], norm_shape[1], 1)))

        self.model.add(Convolution2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(self.final_fc_nm, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.max_n_label, activation='softmax'))

        # 8. Compile model
        self.model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def __init__(self, **kwargs):
        super(KerasCifar10CNN, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same',
                         input_shape=(norm_shape[0], norm_shape[1], 1)))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3), ))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same', ))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3), ))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.max_n_label))
        model.add(Activation('softmax'))
        # initiate RMSprop optimizer
        opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

        # Let's train the model using RMSprop
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
              metrics=['accuracy'])
        self.model = model
项目:head-segmentation    作者:szywind    | 项目源码 | 文件源码
def unit_1(in_layer, n1=64, n2=64, n3=256, s2=1, p2=1, d2=1):
    '''
    Two-Brach Unit
    :param in_layer:
    :return:
    '''
    # branch 1
    x1 = Conv2D(n1, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
    x1 = BatchNormalization(momentum=0.95)(x1)
    x1 = Activation('relu')(x1)

    x1 = ZeroPadding2D(padding=(p2, p2))(x1)
    x1 = Conv2D(n2, (3, 3), strides=(s2, s2), padding='valid', dilation_rate=(d2, d2), kernel_initializer=he_uniform(), use_bias=False)(x1)
    x1 = BatchNormalization(momentum=0.95)(x1)
    x1 = Activation('relu')(x1)

    x1 = Conv2D(n3, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(x1)
    x1 = BatchNormalization(momentum=0.95)(x1)

    # branch 2
    x2 = Conv2D(n3, (1, 1), strides=(s2, s2), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
    x2 = BatchNormalization(momentum=0.95)(x2)

    x = add([x1, x2])
    x = Activation('relu')(x)
    return x
项目:EEDS-keras    作者:MarkPrecursor    | 项目源码 | 文件源码
def model_EEDS():
    _input = Input(shape=(None, None, 1), name='input')
    _EES = EES.model_EES()(_input)
    _EED = EED.model_EED()(_input)
    _EEDS = add(inputs=[_EED, _EES])

    model = Model(input=_input, output=_EEDS)
    Adam = adam(lr=0.0003)
    model.compile(optimizer=Adam, loss='mse')
    return model
项目:EEDS-keras    作者:MarkPrecursor    | 项目源码 | 文件源码
def Res_block():
    _input = Input(shape=(None, None, 64))

    conv = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input)
    conv = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='linear')(conv)

    out = add(inputs=[_input, conv])
    out = Activation('relu')(out)

    model = Model(inputs=_input, outputs=out)

    return model
项目:keras-fcn    作者:JihongJu    | 项目源码 | 文件源码
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
    """A VGG convolutional block with bilinear upsampling for decoding.

    :param classes: Integer, number of classes
    :param scale: Float, scale factor to the input feature, varing from 0 to 1
    :param target_shape: 4D Tuples with targe_height, target_width as
    the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
    `channels_first`.

    >>> from keras_fcn.blocks import vgg_upsampling
    >>> feat1, feat2, feat3 = feat_pyramid[:3]
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
    >>>                    scale=1, block_name='feat1')(feat1, None)
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
    >>>                    scale=1e-2, block_name='feat2')(feat2, y)
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
    >>>                    scale=1e-4, block_name='feat3')(feat3, y)

    """
    def f(x, y):
        score = Conv2D(filters=classes, kernel_size=(1, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='he_normal',
                       kernel_regularizer=l2(weight_decay),
                       name='score_{}'.format(block_name))(x)
        if y is not None:
            def scaling(xx, ss=1):
                return xx * ss
            scaled = Lambda(scaling, arguments={'ss': scale},
                            name='scale_{}'.format(block_name))(score)
            score = add([y, scaled])
        upscore = BilinearUpSampling2D(
            target_shape=target_shape,
            name='upscore_{}'.format(block_name))(score)
        return upscore
    return f
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                     # padding='same',
                     strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other, indices = MaxPoolingWithArgmax2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    if downsample:
        return encoder, indices
    else:
        return encoder
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_multi_input_main_residual_network(batch_size,
                                a2_time_step,
                                d2_time_step,
                                d1_time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    '''
    a multiple residual network for wavelet transformation
    :param batch_size: as you might see
    :param a2_time_step: a2_size
    :param d2_time_step: d2_size
    :param d1_time_step: d1_size
    :param input_dim: input_dim
    :param output_dim: output_dim
    :param loop_depth: depth of residual network
    :param dropout: rate of dropout
    :return: 
    '''
    a2_inp = Input(shape=(a2_time_step,input_dim),name='a2')
    d2_inp = Input(shape=(d2_time_step,input_dim),name='d2')
    d1_inp = Input(shape=(d1_time_step,input_dim),name='a1')

    out = concatenate([a2_inp,d2_inp,d1_inp],axis=1)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out])

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
项目:keras_detect_tool_wear    作者:kidozh    | 项目源码 | 文件源码
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):


    inp = Input(shape=(time_step,input_dim))



    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model