Python tensorflow 模块,mul() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.mul()

项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def ae(x):
    if nonlinearity_name == 'relu':
        f = tf.nn.relu
    elif nonlinearity_name == 'elu':
        f = tf.nn.elu
    elif nonlinearity_name == 'gelu':
        # def gelu(x):
        #     return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
        # f = gelu
        def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
        f = gelu_fast
    elif nonlinearity_name == 'silu':
        def silu(_x):
            return _x * tf.sigmoid(_x)
        f = silu
    # elif nonlinearity_name == 'soi':
    #     def soi_map(x):
    #         u = tf.random_uniform(tf.shape(x))
    #         mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
    #         return tf.cond(is_training, lambda: tf.mul(mask, x),
    #                        lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
    #     f = soi_map

    else:
        raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")

    h1 = f(tf.matmul(x, W['1']) + b['1'])
    h2 = f(tf.matmul(h1, W['2']) + b['2'])
    h3 = f(tf.matmul(h2, W['3']) + b['3'])
    h4 = f(tf.matmul(h3, W['4']) + b['4'])
    h5 = f(tf.matmul(h4, W['5']) + b['5'])
    h6 = f(tf.matmul(h5, W['6']) + b['6'])
    h7 = f(tf.matmul(h6, W['7']) + b['7'])
    return tf.matmul(h7, W['8']) + b['8']
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def build_encoder(self):
    """Inference Network. q(h|X)"""
    with tf.variable_scope("encoder"):
      self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
      self.l1 = tf.nn.relu(self.l1_lin)

      self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
      self.l2 = tf.nn.relu(self.l2_lin)

      self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
      self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")

      self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
      self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))

      self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))

      _ = tf.histogram_summary("mu", self.mu)
      _ = tf.histogram_summary("sigma", self.sigma)
      _ = tf.histogram_summary("h", self.h)
      _ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l2_loss(tensor, weight=1.0, scope=None, normalize=False):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    if normalize:
      loss = tf.sqrt( (tf.sqrt( tf.nn.l2_loss(tensor)) / tf.to_float(tf.size(tensor)))  , name='value')
    else:
      loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')

    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def sparse_cross_entropy_loss(logits, labels,
                       weight=1.0, scope=None):
  """Define a Cross Entropy loss using sparse_softmax_cross_entropy_with_logits.

  It can scale the loss by weight factor, and smooth the labels.

  Args:
    logits: [batch_size, num_classes] logits outputs of the network .
    labels: [batch_size,] target labels.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    A tensor with the softmax_cross_entropy loss.
  """
  with tf.op_scope([logits, labels], scope, 'SparseCrossEntropyLoss'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,name='xentropy')
    weight = tf.convert_to_tensor(weight,
                                    dtype=logits.dtype.base_dtype,
                                    name='loss_weight')

    loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'):
    num_inputs=df_dim*4
    theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
    log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0))
    W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0))
    W = tf.reshape(W,[-1,num_kernels*dim_per_kernel])
    x = input
    x=tf.reshape(x, [batchsize,num_inputs])
    activation = tf.matmul(x, W)
    activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel])
    abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2),
                                                1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1))
    f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif))
    print(f.get_shape())
    print(input.get_shape())
    return tf.concat(1,[x, f])
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _variable_with_weight_decay(self, name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.                                                                                                               
    Note that the Variable is initialized with a truncated normal distribution.                                                                                                                             
    A weight decay is added only if one is specified.                                                                                                                                                       
    Args:                                                                                                                                                                                                   
    name: name of the variable                                                                                                                                                                              
    shape: list of ints                                                                                                                                                                                     
    stddev: standard deviation of a truncated Gaussian                                                                                                                                                      
    wd: add L2Loss weight decay multiplied by this float. If None, weight                                                                                                                                   
        decay is not added for this Variable.                                                                                                                                                               
    Returns:                                                                                                                                                                                                
    Variable Tensor                                                                                                                                                                                         
    """
    dtype = tf.float32 #if FLAGS.use_fp16 else tf.float32                                                                                                                                                    
    var = self._variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
      weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
      tf.add_to_collection('losses', weight_decay)
    return var
项目:3D_CNN_jonas    作者:2015ZxEE    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name   -> name of the variable
        shape  -> list of ints
        stddev -> standard deviation of a truncated Gaussian
        wd     -> add L2Loss weight decay multiplied by this float.
                        If None, weight decay is not added for this Variable.
    Rtns:
        var    -> variable tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var   = variable_on_cpu(name,shape,
                    tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var),wd,name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def apply_image_normalization(image, normalize_per_image=0) :
    if normalize_per_image == 0 : 
      image = tf.sub(image, 0.5)
      image = tf.mul(image, 2.0) # All pixels now between -1.0 and 1.0
      return image
    elif normalize_per_image == 1 :
      image = tf.mul(image, 2.0) # All pixels now between 0.0 and 2.0
      image = image - tf.reduce_mean(image, axis=[0, 1]) 
      # Most pixels should be between -1.0 and 1.0
      return image
    elif normalize_per_image == 2 :
      image = tf.image.per_image_standardization(image)
      image = tf.mul(image, 0.4) # This makes 98.8% of pixels between -1.0 and 1.0
      return image
    else :
      raise ValueError('invalid value for normalize_per_image: %d' % normalize_per_image)
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def _create_network(self):
        # Initialize autoencode network weights and biases
        network_weights = self._initialize_weights(**self.network_architecture)

        # Use recognition network to determine mean and 
        # (log) variance of Gaussian distribution in latent
        # space
        self.z_mean, self.z_log_sigma_sq = \
            self._recognition_network(network_weights["weights_recog"], 
                                      network_weights["biases_recog"])

        # Draw one sample z from Gaussian distribution
        n_z = self.network_architecture["n_z"]
        eps = tf.random_normal((self.batch_size, n_z), 0, 1, 
                               dtype=tf.float32)
        # z = mu + sigma*epsilon
        self.z = tf.add(self.z_mean, 
                        tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        # Use generator to determine mean of
        # Bernoulli distribution of reconstructed input
        self.x_reconstr_mean = \
            self._generator_network(network_weights["weights_gener"],
                                    network_weights["biases_gener"])
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val})
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:probabilistic_line_search    作者:ProbabilisticNumerics    | 项目源码 | 文件源码
def _MatMulGradMom(op, W, out_grad, batch_size, mom=2):
  """Computes gradient moment for a weight matrix through a MatMul operation.

  Assumes ``Z=tf.matmul(A, W)``, where ``W`` is a d1xd2 weight matrix, ``A``
  are the nxd1 activations of the previous layer (n being the batch size).
  ``out_grad`` is the gradient w.r.t. ``Z``, as computed by ``tf.gradients()``.
  No transposes in the MatMul operation allowed.

  Inputs:
      :op: The MatMul operation
      :W: The weight matrix (the tensor, not the variable)
      :out_grad: The tensor of gradient w.r.t. to the output of the op
      :batch_size: Batch size n (constant integer or scalar int tf.Tensor)
      :mom: Integer moment desired (defaults to 2)"""

  assert op.type == "MatMul"
  t_a, t_b = op.get_attr("transpose_a"), op.get_attr("transpose_b")
  assert W is op.inputs[1] and not t_a and not t_b

  A = op.inputs[0]
  out_grad_pow = tf.pow(out_grad, mom)
  A_pow = tf.pow(A, mom)
  return tf.mul(batch_size, tf.matmul(A_pow, out_grad_pow, transpose_a=True))
项目:web_page_classification    作者:yuhui-lin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    var = _variable_on_cpu(name,
                           shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:web_page_classification    作者:yuhui-lin    | 项目源码 | 文件源码
def _variable_with_weight_decay(self, name, shape, stddev, wd=None):
        """Helper to create an initialized Variable with weight decay.
        Note that the Variable is initialized with a truncated normal distribution.
        A weight decay is added only if one is specified.
        Args:
            name: name of the variable
            shape: list of ints
            stddev: standard deviation of a truncated Gaussian
            wd: add L2Loss weight decay multiplied by this float. If None, weight
                decay is not added for this Variable.
        Returns:
            Variable Tensor
        """
        var = self._variable_on_cpu(
            name,
            shape,
            tf.truncated_normal_initializer(stddev=stddev))
        if wd is not None:
            # weight_decay = tf.mul(tf.constant(0.1), wd, name='weight_loss')
            weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
            tf.add_to_collection('losses', weight_decay)
            # tf.add_to_collection('losses', wd)
        return var
项目:bi-att-flow    作者:allenai    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:text-classification2    作者:yuhui-lin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    var = _variable_on_cpu(name,
                           shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:text-classification2    作者:yuhui-lin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    var = _variable_on_cpu(name,
                           shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, wd = 0.0):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a xavier initialization.
    A weight decay is added only if one is specified.

    #Args:
            name: name of the variable
            shape: list of ints
            wd: add L2Loss weight decay multiplied by this float. If None, weight
                    decay is not added for this Variable.

    Returns:
            Variable Tensor
    """
    var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
    # print("change var")
    # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)
    if wd != 0.0:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, wd = 0.0):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a xavier initialization.
    A weight decay is added only if one is specified.

    #Args:
            name: name of the variable
            shape: list of ints
            wd: add L2Loss weight decay multiplied by this float. If None, weight
                    decay is not added for this Variable.

    Returns:
            Variable Tensor
    """
    var = _variable_on_cpu(name, shape, tf.contrib.layers.xavier_initializer())
    # print("change var")
    # var = tf.Variable(tf.truncated_normal(shape, mean= 0.0, stddev = 1.0), name = name)
    if wd != 0.0:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def loss(infer, count_diff_infer, label):
    l2_loss = tf.reduce_mean(tf.reduce_sum(tf.square(infer - label), [1,2,3]), name = 'l2_loss')
    #l2_loss = mf.huber_loss(tf.reduce_sum(infer, [1,2,3]), tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'density_loss')

    huber_epsilon = 5.0
    c_lambda = 0.1
    count_infer = tf.add(tf.squeeze(count_diff_infer), tf.reduce_sum(infer, [1,2,3]), name = "count_infer")
    count_loss = tf.mul(c_lambda, mf.huber_loss(count_infer, tf.reduce_sum(label, [1,2,3]), huber_epsilon, 'huber_loss'),
                name = 'count_loss')
    #count_loss = tf.mul(c_lambda, tf.reduce_mean(tf.square(count_infer - tf.reduce_sum(label, [1,2,3]))),
                    #name = 'count_loss')

    tf.add_to_collection('losses', count_loss)
    tf.add_to_collection('losses', l2_loss)

    return tf.add_n(tf.get_collection('losses'), name = 'total_loss'), count_infer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Chinese-QA    作者:distantJing    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:kboc    作者:vmonaco    | 项目源码 | 文件源码
def _create_network(self):
        network_weights = self._initialize_weights(**self.network_architecture)

        # Use recognition network to determine mean and
        # (log) variance of Gaussian distribution in latent
        # space
        self.z_mean, self.z_log_sigma_sq = \
            self._recognition_network(network_weights["weights_recog"],
                                      network_weights["biases_recog"])

        # Draw one sample z from Gaussian distribution
        n_z = self.network_architecture["n_z"]
        eps = tf.random_normal((self.batch_size, n_z), 0, 1,
                               dtype=tf.float32, seed=np.random.randint(0, 1e9))
        # z = mu + sigma*epsilon
        self.z = tf.add(self.z_mean,
                        tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        # Use generator to determine mean of
        # Bernoulli distribution of reconstructed input
        self.x_reconstr_mean = \
            self._generator_network(network_weights["weights_gener"],
                                    network_weights["biases_gener"])
项目:rec-attend-public    作者:renmengye    | 项目源码 | 文件源码
def weight_variable(shape,
                    initializer=None,
                    init_val=None,
                    wd=None,
                    name=None,
                    trainable=True):
  """Initialize weights.
  Args:
    shape: shape of the weights, list of int
    wd: weight decay
  """
  log = logger.get()
  if initializer is None:
    initializer = tf.truncated_normal_initializer(stddev=0.01)
  if init_val is None:
    var = tf.Variable(initializer(shape), name=name, trainable=trainable)
  else:
    var = tf.Variable(init_val, name=name, trainable=trainable)
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:factorix    作者:gbouchar    | 项目源码 | 文件源码
def sparse_hermitian_product(emb, tuples):
    """
    Compute the Hermitian inner product between selected complex embeddings
    This corresponds to the usual dot product applied on the conjugate of the first vector: <conj(x), y>
    where conj is the complex conjugate (obtained by inverting the imaginary part)
    We consider that the embedding dimension is twice the rank, where the first part is in embeddings[:,:rk] and
    the imaginary part is in embeddings[:,rk:].
    It computes
     S[i] = <conj(E[I[i,1]], E[I[i,2]]>
    Usage:
    S = sparse_hermitian_product(E, I):
    :param emb: embedding matrix of size [n_emb, 2 * r] containing float numbers where r is the complex rank
    :param tuples: tuple matrix of size [n_t, 2] containing integers that correspond to the indices of the embeddings
    :return: a pair containing the real and imaginary parts of the Hermitian dot products
    """
    rk = emb.get_shape()[1].value // 2
    emb_re = emb[:, :rk]
    emb_im = emb[:, rk:]
    emb_sel_a_re = tf.gather(emb_re, tuples[:, 0])
    emb_sel_a_im = tf.gather(emb_im, tuples[:, 0])
    emb_sel_b_re = tf.gather(emb_re, tuples[:, 1])
    emb_sel_b_im = tf.gather(emb_im, tuples[:, 1])
    pred_re = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_re) + tf.mul(emb_sel_a_im, emb_sel_b_im), 1)
    pred_im = tf.reduce_sum(tf.mul(emb_sel_a_re, emb_sel_b_im) - tf.mul(emb_sel_a_im, emb_sel_b_re), 1)
    return pred_re, pred_im
项目:aaai17-cdq    作者:caoyue10    | 项目源码 | 文件源码
def loss_functions(self):
        with tf.device(self.device):
            ### Loss Function
            ### O = L + \lambda (Q^x + Q^y)
            ### L = sum_{ij} (log (1 + exp(alpha * <u_i,v_j>)) - alpha * s_ij * <u_i, v_j>)
            ### Q^x = || u - C * b_x ||
            ### Q^y = || v - C * b_y ||
            ### InnerProduct Value \in [-15, 15]
            InnerProduct = tf.clip_by_value(tf.mul(self.alpha, tf.matmul(self.img_last_layer, tf.transpose(self.txt_last_layer))), -1.5e1, 1.5e1)
            Sim = tf.clip_by_value(tf.matmul(self.img_label, tf.transpose(self.txt_label)), 0.0, 1.0)
            t_ones = tf.ones([tf.shape(self.img_last_layer)[0], tf.shape(self.txt_last_layer)[0]])

            self.cross_entropy_loss = tf.reduce_mean(tf.sub(tf.log(tf.add(t_ones, tf.exp(InnerProduct))), tf.mul(Sim, InnerProduct)))

            self.cq_loss_img = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(self.img_last_layer, tf.matmul(self.b_img, self.C))), 1))
            self.cq_loss_txt = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(self.txt_last_layer, tf.matmul(self.b_txt, self.C))), 1))
            self.q_lambda = tf.Variable(self.cq_lambda, name='lambda')
            self.cq_loss = tf.mul(self.q_lambda, tf.add(self.cq_loss_img, self.cq_loss_txt))
            self.total_loss = tf.add(self.cross_entropy_loss, self.cq_loss)
项目:Face_Point    作者:EllenSimith    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:keras-mdn    作者:yanji84    | 项目源码 | 文件源码
def get_mixture_coef(output, KMIX=24, OUTPUTDIM=1):
  out_pi = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_sigma = tf.placeholder(dtype=tf.float32, shape=[None,KMIX], name="mixparam")
  out_mu = tf.placeholder(dtype=tf.float32, shape=[None,KMIX*OUTPUTDIM], name="mixparam")
  splits = tf.split(1, 2 + OUTPUTDIM, output)
  out_pi = splits[0]
  out_sigma = splits[1]
  out_mu = tf.pack(splits[2:], axis=2)
  out_mu = tf.transpose(out_mu, [1,0,2])
  # use softmax to normalize pi into prob distribution
  max_pi = tf.reduce_max(out_pi, 1, keep_dims=True)
  out_pi = tf.sub(out_pi, max_pi)
  out_pi = tf.exp(out_pi)
  normalize_pi = tf.inv(tf.reduce_sum(out_pi, 1, keep_dims=True))
  out_pi = tf.mul(normalize_pi, out_pi)
  # use exponential to make sure sigma is positive
  out_sigma = tf.exp(out_sigma)
  return out_pi, out_sigma, out_mu
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def leaky_relu(x, alpha=0.01, name='leaky_relu', outputs_collections=None, **unused):
    """
    Computes leaky relu

    Args:
        x: a `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, int16`, or `int8`.
        aplha: the conatant fro scalling the activation
        name: a optional scope/name of the layer
        outputs_collections: The collections to which the outputs are added.

    Returns:
        A `Tensor` representing the results of the activation operation.
    """
    _check_unused(unused, name)
    with tf.name_scope(name):
        try:
            output = tf.nn.relu(x) + tf.mul(alpha, (x - tf.abs(x))) * 0.5
        except Exception:
            output = tf.nn.relu(x) + tf.multiply(alpha, (x - tf.abs(x))) * 0.5
        return _collect_named_outputs(outputs_collections, name, output)
项目:deep_separation_contraction    作者:edouardoyallon    | 项目源码 | 文件源码
def loss(logits, labels,n_class, scope='loss'):
  with tf.variable_scope(scope):
    # entropy loss
    targets = one_hot_embedding(labels, n_class)
    entropy_loss = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(logits, targets),
      name='entropy_loss')
    tf.add_to_collection('losses', entropy_loss)
    # weight l2 decay loss
    weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
    weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
      name='weight_decay_loss')
    tf.add_to_collection('losses', weight_decay_loss)
  for var in tf.get_collection('losses'):
    tf.scalar_summary('losses/' + var.op.name, var)
  # total loss
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:deep_separation_contraction    作者:edouardoyallon    | 项目源码 | 文件源码
def block(x, n_in, n_out, subsample,  phase_train, scope='res_block'):
  with tf.variable_scope(scope):
    if subsample:
      y = conv2d(x, n_in, n_out, 3, 2, 'SAME', False,phase_train, scope='conv_1')
    else:
      y = conv2d(x, n_in, n_out, 3, 1, 'SAME', False,phase_train, scope='conv_1')

    y = \
      batch_norm(y, n_out, phase_train, scope='bn_1')

    y = tf.mul(tf.sign(y),tf.sqrt(tf.abs(y)+1e-5) + 0.1)

    y = conv2d(y, n_out, n_out, 3, 1, 'SAME', False, phase_train, scope='conv_2')
    y = batch_norm(y, n_out, phase_train, scope='bn_2')

    y = tf.mul(tf.sign(y), tf.sqrt(tf.abs(y)+1e-5) + 0.1)
    return y
项目:deep_separation_contraction    作者:edouardoyallon    | 项目源码 | 文件源码
def loss(logits, labels,n_class, scope='loss'):
  with tf.variable_scope(scope):
    # entropy loss
    targets = one_hot_embedding(labels, n_class)
    entropy_loss = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(logits, targets),
      name='entropy_loss')
    tf.add_to_collection('losses', entropy_loss)
    # weight l2 decay loss
    weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')]
    weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses),
      name='weight_decay_loss')
    tf.add_to_collection('losses', weight_decay_loss)
  for var in tf.get_collection('losses'):
    tf.scalar_summary('losses/' + var.op.name, var)
  # total loss
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:real_time_face_recognition    作者:shanren7    | 项目源码 | 文件源码
def lppool(inpOp, pnorm, kH, kW, dH, dW, padding):
  global pool_counter
  global parameters
  name = 'pool' + str(pool_counter)
  pool_counter += 1

  with tf.name_scope('lppool'):
    if pnorm == 2:
      pwr = tf.square(inpOp)
    else:
      pwr = tf.pow(inpOp, pnorm)

    subsamp = tf.nn.avg_pool(pwr,
                          ksize=[1, kH, kW, 1],
                          strides=[1, dH, dW, 1],
                          padding=padding,
                          name=name)
    subsamp_sum = tf.mul(subsamp, kH*kW)

    if pnorm == 2:
      out = tf.sqrt(subsamp_sum)
    else:
      out = tf.pow(subsamp_sum, 1/pnorm)

  return out