Python tensorflow.contrib.layers 模块,l2_regularizer() 实例源码

我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用tensorflow.contrib.layers.l2_regularizer()

项目:various_residual_networks    作者:yuhui-lin    | 项目源码 | 文件源码
def BN_ReLU(self, net):
        """Batch Normalization and ReLU."""
        # 'gamma' is not used as the next layer is ReLU
        net = batch_norm(net,
                         center=True,
                         scale=False,
                         activation_fn=tf.nn.relu, )
        self._activation_summary(net)
        return net

        # def conv2d(self, net, num_ker, ker_size, stride):
        # 1D-convolution
        net = convolution2d(
            net,
            num_outputs=num_ker,
            kernel_size=[ker_size, 1],
            stride=[stride, 1],
            padding='SAME',
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=variance_scaling_initializer(),
            weights_regularizer=l2_regularizer(self.weight_decay),
            biases_initializer=tf.zeros_initializer)
        return net
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength)
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def __init__(self):
        self.embeddingSize = ner_tv.flags.embedding_dim
        self.distinctTagNum = ner_tv.flags.tags_num
        self.numHidden = ner_tv.flags.hidden_neural_size
        self.c2v = load_word2Vec(ner_tv.word2vec_path)
        self.words = tf.Variable(self.c2v,name = 'words')
        self.sentence_length = ner_tv.flags.sentence_length
        self.initial_learning_rate = ner_tv.flags.initial_learning_rate

        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[self.numHidden *2,self.distinctTagNum],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer= l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([self.distinctTagNum],name='bias'))
        self.trains_params = None
        self.inp = tf.placeholder(tf.int32,shape=[None,self.sentence_length],name='input_placeholder')

        self.model_save_path = ner_tv.training_model_bi_lstm
        self.saver = tf.train.Saver()
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def resnet_arg_scope(
        weight_decay=0.0001,
        batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5,
        batch_norm_scale=True,
):
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
    }
    l2_regularizer = layers.l2_regularizer(weight_decay)

    arg_scope_layers = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(),
        weights_regularizer=l2_regularizer,
        activation_fn=tf.nn.relu)
    arg_scope_conv = arg_scope(
        [layers.conv2d, my_layers.preact_conv2d],
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params)
    with arg_scope_layers, arg_scope_conv as arg_sc:
        return arg_sc
项目:web_page_classification    作者:yuhui-lin    | 项目源码 | 文件源码
def conv1d(self, net, num_ker, ker_size, stride):
        # 1D-convolution
        net = convolution2d(
            net,
            num_outputs=num_ker,
            kernel_size=[ker_size, 1],
            stride=[stride, 1],
            padding='SAME',
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=variance_scaling_initializer(),
            weights_regularizer=l2_regularizer(self.weight_decay),
            biases_initializer=tf.zeros_initializer)
        return net
项目:class-activation-mapping    作者:markdtw    | 项目源码 | 文件源码
def get_conv_var(self, f_size, in_c, out_c, name):
        if name in self.params.keys():
            w_initializer = tf.constant_initializer(self.params[name][0].transpose((2, 3, 1, 0)))
            b_initializer = tf.constant_initializer(self.params[name][1])
        else:
            b_initializer = w_initializer = xavier_initializer()
        f = tf.get_variable(name+'_f', [f_size, f_size, in_c, out_c],
                initializer=w_initializer, regularizer=l2_regularizer(self.l2_beta))
        b = tf.get_variable(name+'_b', [out_c], initializer=b_initializer)
        return f, b
项目:class-activation-mapping    作者:markdtw    | 项目源码 | 文件源码
def get_fc_var(self, in_size, out_size, name):
        if name in self.params.keys():
            w_initializer = tf.constant_initializer(self.params[name][0].transpose((1, 0)))
            b_initializer = tf.constant_initializer(self.params[name][1])
        else:
            b_initializer = w_initializer = xavier_initializer()
        w = tf.get_variable(name+'_w', [in_size, out_size],
                initializer=w_initializer, regularizer=l2_regularizer(self.l2_beta))
        b = tf.get_variable(name+'_b', [out_size], initializer=b_initializer)
        return w, b
项目:various_residual_networks    作者:yuhui-lin    | 项目源码 | 文件源码
def conv2d(self, net, num_ker, ker_size, stride):
        net = convolution2d(
            net,
            num_outputs=num_ker,
            kernel_size=[ker_size, ker_size],
            stride=[stride, stride],
            padding='SAME',
            activation_fn=None,
            normalizer_fn=None,
            weights_initializer=variance_scaling_initializer(),
            weights_regularizer=l2_regularizer(FLAGS.weight_decay),
            biases_initializer=tf.zeros_initializer)
        return net
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def __init__(self,embeddingSize,distinctTagNum,c2vPath,numHidden):
        self.embeddingSize = embeddingSize
        self.distinctTagNum = distinctTagNum
        self.numHidden = numHidden
        self.c2v = self.c2v(c2vPath)
        self.words = tf.Variable(self.c2v,name = 'words')
        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[numHidden *2,distinctTagNum],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer= l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([distinctTagNum],name='bias'))
        self.trains_params = None
        self.inp = tf.placeholder(tf.int32,shape=[None,nlp_segment.flags.max_sentence_len],name='input_placeholder')
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def __init__(self):
        self.embeddingSize = nlp_segment.flags.embedding_size
        self.num_tags = nlp_segment.flags.num_tags
        self.num_hidden = nlp_segment.flags.num_hidden
        self.learning_rate = nlp_segment.flags.learning_rate
        self.batch_size = nlp_segment.flags.batch_size
        self.model_save_path = nlp_segment.model_save_path

        self.input = tf.placeholder(tf.int32,
                                  shape=[None, FLAGS.max_sentence_len],
                                  name="input_placeholder")

        self.label = tf.placeholder(tf.int32,
                                    shape=[None, FLAGS.max_sentence_len],
                                    name="label_placeholder")
        self.dropout = tf.placeholder(tf.float32,name="dropout")

        with tf.name_scope("embedding_layer"):
            self.word_embedding = tf.Variable(data_loader.load_w2v(nlp_segment.word_vec_path), name="word_embedding")
            inputs_embed = tf.nn.embedding_lookup(self.word_embedding,self.input)
            length = self.length(self.input)
            self.length_64 = tf.cast(length, tf.int64)
            reuse = None #if self.trainMode else True


            # if trainMode:
            #  word_vectors = tf.nn.dropout(word_vectors, 0.5)
            with tf.name_scope("rnn_fwbw") as scope:
                lstm_fw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)
                lstm_bw = rnn.LSTMCell(self.num_hidden,use_peepholes=True)

                inputs = tf.unstack(inputs_embed, nlp_segment.flags.max_sentence_len, 1)
                outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw, lstm_bw, inputs, sequence_length=self.length_64,
                                                            dtype=tf.float32)
            output = tf.reshape(outputs, [-1, self.num_hidden * 2])
            #if self.trainMode:
            output = tf.nn.dropout(output, self.dropout)

        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[self.num_hidden * 2, self.num_tags],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer=l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([self.num_tags], name='bias'))
            matricized_unary_scores = tf.matmul(output, self.W) + self.b
            # matricized_unary_scores = tf.nn.log_softmax(matricized_unary_scores)
            self.unary_scores = tf.reshape(
                matricized_unary_scores,
                [-1, FLAGS.max_sentence_len, self.num_tags])
        with tf.name_scope("crf"):
            self.transition_params = tf.get_variable(
                "transitions",
                shape=[self.num_tags, self.num_tags],
                initializer=self.initializer)
            log_likelihood, self.transition_params = crf.crf_log_likelihood(self.unary_scores, self.label, self.length_64,self.transition_params)
        self.loss = tf.reduce_mean(-log_likelihood)
        self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
        self.saver = tf.train.Saver()
项目:TradingTensors    作者:Henry-bee    | 项目源码 | 文件源码
def build_q_network(self, hiddens):

        out = self._inputs

        for hidden in hiddens:
            out= layers.fully_connected(inputs=out, num_outputs= hidden, activation_fn=tf.tanh, weights_regularizer=layers.l2_regularizer(scale=0.1))
            out = tf.nn.dropout(out, self.keep_prob)

        self.Q_t = layers.fully_connected(out, self.num_actions, activation_fn=None)
        self.Q_action = tf.argmax(self.Q_t, dimension=1)
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def vgg_arg_scope(
        weight_decay=0.0005,
        use_batch_norm=False):
    """"""
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    normalizer_fn = layers.batch_norm if use_batch_norm else None
    normalizer_params = batch_norm_params if use_batch_norm else None
    l2_regularizer = layers.l2_regularizer(weight_decay)  # 0.00004

    with arg_scope(
            [layers.fully_connected],
            biases_initializer=tf.constant_initializer(0.1),
            weights_initializer=layers.variance_scaling_initializer(factor=1.0),
            weights_regularizer=l2_regularizer,
            activation_fn=tf.nn.relu):
        with arg_scope(
                [layers.conv2d],
                normalizer_fn=normalizer_fn,
                normalizer_params=normalizer_params,
                weights_initializer=layers.variance_scaling_initializer(factor=1.0),
                weights_regularizer=l2_regularizer,
                activation_fn=tf.nn.relu) as arg_sc:
            return arg_sc
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def inception_arg_scope(
        weight_decay=0.00004,
        use_batch_norm=True,
        batch_norm_decay=0.9997,
        batch_norm_epsilon=0.001,
):
    # Parameters for BatchNorm.
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': batch_norm_decay,
        # epsilon to prevent 0s in variance.
        'epsilon': batch_norm_epsilon,
    }
    if use_batch_norm:
        normalizer_fn = layers.batch_norm
        normalizer_params = batch_norm_params
    else:
        normalizer_fn = None
        normalizer_params = {}
    # Set weight_decay for weights in Conv and FC layers.
    l2_regularizer = layers.l2_regularizer(weight_decay)
    activation_fn = tf.nn.relu  # tf.nn.elu

    arg_scope_weights = arg_scope(
        [layers.conv2d, layers.fully_connected],
        weights_initializer=layers.variance_scaling_initializer(factor=1.0),
        weights_regularizer=l2_regularizer
    )
    arg_scope_conv = arg_scope(
        [layers.conv2d],
        activation_fn=activation_fn,
        normalizer_fn=normalizer_fn,
        normalizer_params=normalizer_params
    )
    with arg_scope_weights, arg_scope_conv as arg_sc:
        return arg_sc
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def conv2d( inputs , outputs_dim , kernel_size ,   stride ,   padding = "SAME" , he_init = False , activation_fn = None , regularization_scale = 0.0  ): 
    C = inputs.get_shape()[-1].value
    fan_in = C * kernel_size**2
    fan_out = C * kernel_size**2 / stride**2
    avg_fan = (fan_in + fan_out) / 2
    if he_init:
        var = 2.0/avg_fan
    else :
        var = 1.0/avg_fan
    # var = (b - a)**2 / 12 , b==-a ,  (zero mean)
    upper_bound = np.sqrt( 12.0*var ) * 0.5 
    weights_initializer = tf.random_uniform_initializer( -upper_bound , upper_bound , seed = None , dtype = tf.float32 )
    weights_regularizer = layers.l2_regularizer( scale = regularization_scale )
    return layers.conv2d( inputs = inputs , num_outputs = outputs_dim , kernel_size = kernel_size , stride =  stride, padding = "SAME"  , activation_fn = activation_fn , weights_initializer = weights_initializer   , weights_regularizer = weights_regularizer )
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def fully_connected( inputs , outputs_dim ,  he_init = False , activation_fn = None , regularization_scale = 0.0   ):
    x = layers.flatten( inputs )
    fan_in = x.get_shape()[-1].value
    fan_out = ( C + outputsdim ) / 2
    avg_fan = ( fan_in + fan_out ) / 2 
    if he_init:
        var = 2.0/avg_fan
    else:
        var = 1.0/avg_fan
    # var = (b - a)**2 / 12 , b==-a ,  (zero mean)
    upper_bound = np.sqrt( 12.0 * var ) *0.5
    weights_initializer = tf.random_uniform_initializer( -upper_bound , upper_bound , seed = None , dtype = tf.float32 )
    weights_regularizer = layers.l2_regularizer( scale = regularization_scale )
    return layers.fully_connected( x , outputs_dim , weights_initializer =  weights_initializer , activation_fn = activation_fn   ,   weights_regularizer = weights_regularizer )
项目:EDSR    作者:iwtw    | 项目源码 | 文件源码
def conv2d_transpose( inputs , outputs_dim , kernel_size , stride , padding = "SAME" , he_init = False , activation_fn = None , regularization_scale = 0.0   ):
    C = inputs.get_shape()[-1].value
    fan_in = C * kernel_size**2 / stride**2
    fan_out = C * kernel_size**2 
    avg_fan = ( fan_in + fan_out ) / 2 
    if he_init:
        var = 2.0/avg_fan
    else :
        var = 1.0/avg_fan
    # var = ( b - a )**2 /12 , b==-a , (zero mean)
    upper_bound = np.sqrt( 12.0 * var ) *0.5
    weights_initializer = tf.random_uniform_initializer( -upper_bound , upper_bound , seed = None , dtype = tf.float32 )
    weights_regularizer = layers.l2_regularizer( scale = regularization_scale )
    return layers.conv2d_transpose( inputs , outputs_dim , kernel_size = kernel_size , stride = stride , padding = padding ,  weights_initializer = weights_initializer ,  activation_fn = activation_fn , weights_regularizer = weights_regularizer )
项目:various_residual_networks    作者:yuhui-lin    | 项目源码 | 文件源码
def resnn(self, image_batch):
        """Build the resnn model.
        Args:
            image_batch: Sequences returned from inputs_train() or inputs_eval.
        Returns:
            Logits.
        """
        # First convolution
        with tf.variable_scope('conv_layer1'):
            net = self.conv2d(image_batch, self.groups[0].num_ker, 5, 1)
            net = self.BN_ReLU(net)

        # Max pool
        if FLAGS.max_pool:
            net = tf.nn.max_pool(net,
                                 [1, 3, 3, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')

        # stacking Residual Units
        for group_i, group in enumerate(self.groups):
            for unit_i in range(group.num_units):
                net = self.residual_unit(net, group_i, unit_i)

        # an extra activation before average pooling
        if FLAGS.special_first:
            with tf.variable_scope('special_BN_ReLU'):
                net = self.BN_ReLU(net)

        # padding should be VALID for global average pooling
        # output: batch*1*1*channels
        net_shape = net.get_shape().as_list()
        net = tf.nn.avg_pool(net,
                             ksize=[1, net_shape[1], net_shape[2], 1],
                             strides=[1, 1, 1, 1],
                             padding='VALID')

        net_shape = net.get_shape().as_list()
        softmax_len = net_shape[1] * net_shape[2] * net_shape[3]
        net = tf.reshape(net, [-1, softmax_len])

        # add dropout
        if FLAGS.dropout:
            with tf.name_scope("dropout"):
                net = tf.nn.dropout(net, FLAGS.dropout_keep_prob)

        # 2D-fully connected nueral network
        with tf.variable_scope('FC-layer'):
            net = fully_connected(
                net,
                num_outputs=FLAGS.num_cats,
                activation_fn=None,
                normalizer_fn=None,
                weights_initializer=variance_scaling_initializer(),
                weights_regularizer=l2_regularizer(FLAGS.weight_decay),
                biases_initializer=tf.zeros_initializer, )

        return net
项目:deeplearning    作者:fanfanfeng    | 项目源码 | 文件源码
def __init__(self):
        self.embedding_size = nlp_segment.flags.embedding_size
        self.num_tags = nlp_segment.flags.num_tags
        self.num_hidden = nlp_segment.flags.num_hidden
        self.learning_rate = nlp_segment.flags.learning_rate
        self.sentence_length = nlp_segment.flags.max_sentence_len
        self.word2vec_path = nlp_segment.word_vec_path

        self.model_save_path = nlp_segment.model_save_path
        self.hidden_layer_num = 1
        self.max_grad_norm = nlp_segment.flags.max_grad_norm

        self.input_x = tf.placeholder(dtype=tf.int32,shape=[None,self.sentence_length],name="input_x")
        self.labels = tf.placeholder(dtype=tf.int32,shape=[None,self.sentence_length],name='label')
        self.lengths = tf.placeholder(dtype=tf.int32,shape=[None],name='lengths')
        self.dropout = tf.placeholder(dtype=tf.float32,name='dropout')

        with tf.name_scope("embedding_layer"):
            self.word_embedding = tf.Variable(data_loader.load_w2v(),name="word_embedding")
            inputs_embed = tf.nn.embedding_lookup(self.word_embedding,self.input_x)
            # ?????????,????(batch_size, n_steps, n_input)????????n_steps???,
            # ????????(batch_size, n_input), ????LSTM???????
            inputs_embed = tf.unstack(inputs_embed, self.sentence_length, 1)

        features = self.bi_lstm_layer(inputs_embed)

        with tf.variable_scope('Softmax') as scope:
            self.W = tf.get_variable(shape=[self.num_hidden *2,self.num_tags],
                                     initializer=tf.truncated_normal_initializer(stddev=0.01),
                                     name='weights',
                                     regularizer= l2_regularizer(0.001))
            self.b = tf.Variable(tf.zeros([self.num_tags],name='bias'))

            scores = tf.matmul(features,self.W) + self.b
            self.scores = tf.reshape(scores, [-1, self.sentence_length, self.num_tags])

        with tf.name_scope("crf"):
            log_likelihood,self.trans_form = crf.crf_log_likelihood(self.scores,self.labels,self.lengths)

        with tf.name_scope("output"):
            self.loss = tf.reduce_mean(-1.0 * log_likelihood)

        self.global_step = tf.Variable(0,name="global_step",trainable=False)
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate)

        t_vars = tf.trainable_variables()
        grads,_ = tf.clip_by_global_norm(tf.gradients(self.loss,t_vars),self.max_grad_norm)
        self.trans_op = self.optimizer.apply_gradients(zip(grads,t_vars),self.global_step)
        self.saver = tf.train.Saver()