Python tensorflow 模块,multiply() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.multiply()

项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:tensorflow-prebuilt-classifier    作者:recursionbane    | 项目源码 | 文件源码
def should_distort_images(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Whether any distortions are enabled, from the input flags.

  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.

  Returns:
    Boolean value indicating whether any distortions should be applied.
  """
  return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
          (random_brightness != 0))
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def should_distort_images(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Whether any distortions are enabled, from the input flags.

  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.

  Returns:
    Boolean value indicating whether any distortions should be applied.
  """
  return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
          (random_brightness != 0))
项目:tensorflow-image-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def should_distort_images(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Whether any distortions are enabled, from the input flags.

  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.

  Returns:
    Boolean value indicating whether any distortions should be applied.
  """
  return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
          (random_brightness != 0))
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, wd, initializer, trainable=True):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_device(name, shape, initializer, trainable)
  if wd is not None and trainable:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None and not tf.get_variable_scope().reuse:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def _add_cross_entropy(labels, logits, pref):
    """Compute average cross entropy and add to loss collection.
    Args:
        labels: Single dimension labels from distorted_inputs() or inputs().
        logits: Output map from inference().
        pref: Either 'c' or 's', for contours or segments, respectively.
    """
    with tf.variable_scope('{}_cross_entropy'.format(pref)) as scope:
        class_prop = C_CLASS_PROP if pref == 'c' else S_CLASS_PROP
        weight_per_label = tf.scalar_mul(class_prop, tf.cast(tf.equal(labels, 0),
                                                             tf.float32)) + \
                           tf.scalar_mul(1.0 - class_prop, tf.cast(tf.equal(labels, 1),
                                                                   tf.float32))
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(
            labels=tf.squeeze(labels, squeeze_dims=[3]), logits=logits)
        cross_entropy_weighted = tf.multiply(weight_per_label, cross_entropy)
        cross_entropy_mean = tf.reduce_mean(cross_entropy_weighted, name=scope.name)
        tf.add_to_collection('losses', cross_entropy_mean)
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def get_dice_coef(logits, labels):
    """Compute dice coefficient.
    Args:
        logits: Softmax probability applied to fuse layers.
        labels: Correct annotations (0 or 1).
    Returns:
        Mean dice coefficient over full tensor.

    Source:
        https://github.com/zsdonghao/tensorlayer/blob/master/tensorlayer/cost.py#L125
    """
    smooth = 1e-5
    inter = tf.reduce_sum(tf.multiply(logits, labels))
    l = tf.reduce_sum(logits)
    r = tf.reduce_sum(labels)
    return tf.reduce_mean((2.0 * inter + smooth) / (l + r + smooth))
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def mu_law_encode_nonlinear(audio, quantization_channels=256):
    '''
    Compress the waveform amplitudes using mu-law non-linearity. 
    NOTE: This mu-law functions as a non-linear function as opposed to 
          quantization.
    '''
    with tf.name_scope('encode'):
        mu = tf.to_float(quantization_channels - 1)
        # Perform mu-law companding transformation (ITU-T, 1988).
        # Minimum operation is here to deal with rare large amplitudes caused
        # by resampling.
        safe_audio_abs = tf.minimum(tf.abs(audio), 1.0)
        magnitude = tf.log1p(mu * safe_audio_abs) / tf.log1p(mu)
        signal = tf.multiply(tf.sign(audio), magnitude, name='mulaw')
        # Quantize signal to the specified number of levels.
        # return tf.to_int32((signal + 1) / 2 * mu + 0.5)
        return signal
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def categorical_crossentropy_3d(y_true, y_predicted):
    """
    Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array
    with shape (num_samples, num_classes, dim1, dim2, dim3)

    Parameters
    ----------
    y_true : keras.placeholder [batches, dim0,dim1,dim2]
        Placeholder for data holding the ground-truth labels encoded in a one-hot representation
    y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2]
        Placeholder for data holding the softmax distribution over classes

    Returns
    -------
    scalar
        Categorical cross-entropy loss value
    """
    y_true_flatten = K.flatten(y_true)
    y_pred_flatten = K.flatten(y_predicted)
    y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon())
    num_total_elements = K.sum(y_true_flatten)
    # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log))
    cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log))
    mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon())
    return mean_cross_entropy
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def output_module(self):
        """
        1.use attention mechanism between query and hidden states, to get weighted sum of hidden state. 2.non-linearity of query and hidden state to get label.
        input: query_embedding:[batch_size,embed_size], hidden state:[batch_size,block_size,hidden_size] of memory
        :return:y: predicted label.[]
        """
        # 1.use attention mechanism between query and hidden states, to get weighted sum of hidden state.
        # 1.1 get possibility distribution (of similiarity)
        p=tf.nn.softmax(tf.multiply(tf.expand_dims(self.query_embedding,axis=1),self.hidden_state)) #shape:[batch_size,block_size,hidden_size]<---query_embedding_expand:[batch_size,1,hidden_size]; hidden_state:[batch_size,block_size,hidden_size]
        # 1.2 get weighted sum of hidden state
        u=tf.reduce_sum(tf.multiply(p,self.hidden_state),axis=1) #shape:[batch_size,hidden_size]<----------([batch_size,block_size,hidden_size],[batch_size,block_size,hidden_size])

        # 2.non-linearity of query and hidden state to get label
        H_u_matmul=tf.matmul(u,self.H)+self.h_u_bias #shape:[batch_size,hidden_size]<----([batch_size,hidden_size],[hidden_size,hidden_size])
        activation=self.activation(self.query_embedding + H_u_matmul,scope="query_add_hidden")           #shape:[batch_size,hidden_size]
        activation = tf.nn.dropout(activation,keep_prob=self.dropout_keep_prob) #shape:[batch_size,hidden_size]
        y=tf.matmul(activation,self.R)+self.y_bias #shape:[batch_size,vocab_size]<-----([batch_size,hidden_size],[hidden_size,vocab_size])
        return y #shape:[batch_size,vocab_size]
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def inference(self):
        """ building blocks:
        encoder:6 layers.each layers has two   sub-layers. the first is multi-head self-attention mechanism; the second is position-wise fully connected feed-forward network.
               for each sublayer. use LayerNorm(x+Sublayer(x)). all dimension=512.
        decoder:6 layers.each layers has three sub-layers. the second layer is performs multi-head attention over the ouput of the encoder stack.
               for each sublayer. use LayerNorm(x+Sublayer(x)).
        """
        # 1.embedding for encoder input & decoder input
        # 1.1 position embedding for encoder input
        input_x_embeded = tf.nn.embedding_lookup(self.Embedding,self.input_x)  #[None,sequence_length, embed_size]
        input_x_embeded=tf.multiply(input_x_embeded,tf.sqrt(tf.cast(self.d_model,dtype=tf.float32)))
        input_mask=tf.get_variable("input_mask",[self.sequence_length,1],initializer=self.initializer)
        input_x_embeded=tf.add(input_x_embeded,input_mask) #[None,sequence_length,embed_size].position embedding.

        # 2. encoder
        encoder_class=Encoder(self.d_model,self.d_k,self.d_v,self.sequence_length,self.h,self.batch_size,self.num_layer,input_x_embeded,input_x_embeded,dropout_keep_prob=self.dropout_keep_prob,use_residual_conn=self.use_residual_conn)
        Q_encoded,K_encoded = encoder_class.encoder_fn() #K_v_encoder

        Q_encoded=tf.reshape(Q_encoded,shape=(self.batch_size,-1)) #[batch_size,sequence_length*d_model]
        with tf.variable_scope("output"):
            logits = tf.matmul(Q_encoded, self.W_projection) + self.b_projection #logits shape:[batch_size*decoder_sent_length,self.num_classes]
        print("logits:",logits)
        return logits
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def masked_softmax(tensor, mask, expand=2, axis=1):
    """Masked soft-max using Lambda and merge-multiplication.

    Args:
        tensor: tensor containing scores
        mask: mask for tensor where 1 - means values at this position and 0 - means void, padded, etc..
        expand: axis along which to repeat mask
        axis: axis along which to compute soft-max

    Returns:
        masked soft-max values
    """

    mask = tf.expand_dims(mask, axis=expand)
    exponentiate = Lambda(lambda x: K.exp(x - K.max(x, axis=axis, keepdims=True)))(tensor)
    masked = tf.multiply(exponentiate, mask)
    div = tf.expand_dims(tf.reduce_sum(masked, axis=axis), axis=axis)
    predicted = tf.divide(masked, div)
    return predicted
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def answer_start_pred(context_encoding, question_attention_vector, context_mask, W, dropout_rate):
    """Answer start prediction layer."""

    answer_start = Lambda(lambda arg:
                          concatenate([arg[0], arg[1], arg[2]]))([
        context_encoding,
        question_attention_vector,
        multiply([context_encoding, question_attention_vector])])

    answer_start = TimeDistributed(Dense(W, activation='relu'))(answer_start)
    answer_start = Dropout(rate=dropout_rate)(answer_start)
    answer_start = TimeDistributed(Dense(1))(answer_start)

    # apply masking
    answer_start = Lambda(lambda q: masked_softmax(q[0], q[1]))([answer_start, context_mask])
    answer_start = Lambda(lambda q: flatten(q))(answer_start)
    return answer_start
项目:acdc_segmenter    作者:baumgach    | 项目源码 | 文件源码
def pixel_wise_cross_entropy_loss_weighted(logits, labels, class_weights):
    '''
    Weighted cross entropy loss, with a weight per class
    :param logits: Network output before softmax
    :param labels: Ground truth masks
    :param class_weights: A list of the weights for each class
    :return: weighted cross entropy loss
    '''

    n_class = len(class_weights)

    flat_logits = tf.reshape(logits, [-1, n_class])
    flat_labels = tf.reshape(labels, [-1, n_class])

    class_weights = tf.constant(np.array(class_weights, dtype=np.float32))

    weight_map = tf.multiply(flat_labels, class_weights)
    weight_map = tf.reduce_sum(weight_map, axis=1)

    loss_map = tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits, labels=flat_labels)
    weighted_loss = tf.multiply(loss_map, weight_map)

    loss = tf.reduce_mean(weighted_loss)

    return loss
项目:paraphrase-id-tensorflow    作者:nelson-liu    | 项目源码 | 文件源码
def mask_similarity_matrix(similarity_matrix, mask_a, mask_b):
    """
    Given the mask of the two sentences, apply the mask to the similarity
    matrix.

    Parameters
    ----------
    similarity_matrix: Tensor
        Tensor of shape (batch_size, num_sentence_words, num_sentence_words).

    mask_a: Tensor
        Tensor of shape (batch_size, num_sentence_words). This mask should
        correspond to the first vector (v1) used to calculate the similarity
        matrix.

    mask_b: Tensor
        Tensor of shape (batch_size, num_sentence_words). This mask should
        correspond to the second vector (v2) used to calculate the similarity
        matrix.
    """
    similarity_matrix = tf.multiply(similarity_matrix,
                                    tf.expand_dims(tf.cast(mask_a, "float"), 1))
    similarity_matrix = tf.multiply(similarity_matrix,
                                    tf.expand_dims(tf.cast(mask_b, "float"), 2))
    return similarity_matrix
项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def create_training_batch(serialized_example, cfg, add_summaries):

    features = get_region_data(serialized_example, cfg, fetch_ids=False,
                               fetch_labels=True, fetch_text_labels=False)

    original_image = features['image']
    bboxes = features['bboxes']
    labels = features['labels']

    distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)

    distorted_inputs = tf.subtract(distorted_inputs, 0.5)
    distorted_inputs = tf.multiply(distorted_inputs, 2.0)

    names = ('inputs', 'labels')
    tensors = [distorted_inputs, labels]
    return [names, tensors]
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def create_classification_batch(serialized_example, cfg, add_summaries):

    features = get_region_data(serialized_example, cfg, fetch_ids=True,
                               fetch_labels=False, fetch_text_labels=False)

    original_image = features['image']
    bboxes = features['bboxes']
    ids = features['ids']

    distorted_inputs = get_distorted_inputs(original_image, bboxes, cfg, add_summaries)

    distorted_inputs = tf.subtract(distorted_inputs, 0.5)
    distorted_inputs = tf.multiply(distorted_inputs, 2.0)

    names = ('inputs', 'ids')
    tensors = [distorted_inputs, ids]
    return [names, tensors]
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_two_ops_network(self):
    shape = loom.TypeShape('int64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_ops=ops)
    output_tensor = the_loom.output_tensor(shape)
    with self.test_session():
      weaver = the_loom.make_weaver()
      c1 = weaver(np.array([1, 2, 3], dtype='int64'))
      c2 = weaver(np.array([2, 4, 6], dtype='int64'))
      c3 = weaver(np.array([3, 6, 9], dtype='int64'))
      sum_2_3 = weaver.add(c2, c3)
      sum_12_13 = weaver.mul(c1, sum_2_3)
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([sum_12_13]))
    self.assertTrue((result == np.array([[5, 20, 45]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_two_ops_network_tagged_named_tensorx(self):
    shape = loom.TypeShape('int64', (3,), tag='x')
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    named_tensors = {
        'c1': (tf.constant(np.array([1, 2, 3], dtype='int64')), 'x'),
        'c2': (tf.constant(np.array([2, 4, 6], dtype='int64')), 'x'),
        'c3': (tf.constant(np.array([3, 6, 9], dtype='int64')), 'x')
    }
    the_loom = loom.Loom(named_ops=ops, named_tensors=named_tensors)
    output_tensor = the_loom.output_tensor(shape)
    with self.test_session():
      weaver = the_loom.make_weaver()
      sum_2_3 = weaver.add(weaver.c2, weaver.c3)
      sum_12_13 = weaver.mul(weaver.c1, sum_2_3)
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([sum_12_13]))
    self.assertTrue((result == np.array([[5, 20, 45]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_gradient(self):
    x_var = tf.Variable(tf.zeros([3], dtype='float64'), name='x')
    shape = loom.TypeShape('float64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_tensors={'x': x_var}, named_ops=ops)

    output_tensor = the_loom.output_tensor(shape)
    output = tf.reduce_sum(output_tensor)
    gradient = tf.gradients(output, [x_var])[0]
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())

      weaver = the_loom.make_weaver()
      m = weaver(np.array([1, 2, 3], dtype='float64'))
      b = weaver(np.array([47, 9, -1], dtype='float64'))
      mx = weaver.mul(m, weaver.x)
      mx_plus_b = weaver.add(mx, b)
      result = gradient.eval(feed_dict=weaver.build_feed_dict([mx_plus_b]))
    self.assertTrue((result == np.array(
        [1.0, 2.0, 3.0], dtype='float64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_gradient_with_direct_feed_dict(self):
    x_var = tf.Variable(tf.zeros([3], dtype='float64'), name='x')
    shape = loom.TypeShape('float64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_tensors={'x': x_var}, named_ops=ops,
                         direct_feed_dict=True)

    output_tensor = the_loom.output_tensor(shape)
    output = tf.reduce_sum(output_tensor)
    gradient = tf.gradients(output, [x_var])[0]
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())

      weaver = the_loom.make_weaver()
      m = weaver(np.array([1, 2, 3], dtype='float64'))
      b = weaver(np.array([47, 9, -1], dtype='float64'))
      mx = weaver.mul(m, weaver.x)
      mx_plus_b = weaver.add(mx, b)
      result = gradient.eval(feed_dict=weaver.build_feed_dict([mx_plus_b]))
    self.assertTrue((result == np.array(
        [1.0, 2.0, 3.0], dtype='float64')).all())
项目:tf-cnn-lstm-ocr-captcha    作者:Luonic    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:baselines    作者:openai    | 项目源码 | 文件源码
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
    with tf.variable_scope(name, reuse=reuse):
        assert (len(U.scope_name().split('/')) == 2)

        w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
        b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
        weight_decay_fc = 3e-4

        if weight_loss_dict is not None:
            weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
            if weight_loss_dict is not None:
                weight_loss_dict[w] = weight_decay_fc
                weight_loss_dict[b] = 0.0

            tf.add_to_collection(U.scope_name().split('/')[0] + '_' + 'losses', weight_decay)

        return tf.nn.bias_add(tf.matmul(x, w), b)
项目:youtube-8m    作者:google    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:Video-Classification    作者:boyaolin    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def _build_net(self, input_BO, scope):
        """ The Actor network.

        Uses ReLUs for all hidden layers, but a tanh to the output to bound the
        action. This follows their 'low-dimensional networks' using 400 and 300
        units for the hidden layers. Set `reuse=False`. I don't use batch
        normalization or their precise weight initialization.
        """
        with tf.variable_scope(scope, reuse=False):
            hidden1 = layers.fully_connected(input_BO,
                    num_outputs=400,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            hidden2 = layers.fully_connected(hidden1, 
                    num_outputs=300,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.relu)
            actions_BA = layers.fully_connected(hidden2,
                    num_outputs=self.ac_dim,
                    weights_initializer=layers.xavier_initializer(),
                    activation_fn=tf.nn.tanh) # Note the tanh!
            # This should broadcast, but haven't tested with ac_dim > 1.
            actions_BA = tf.multiply(actions_BA, self.ac_high)
            return actions_BA
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def yuv2rgb(yuv):
    """
    Convert YUV image into RGB https://en.wikipedia.org/wiki/YUV
    """
    yuv = tf.multiply(yuv, 255)
    yuv2rgb_filter = tf.constant([[[[1., 1., 1.], [0., -0.34413999, 1.77199996],
                                    [1.40199995, -0.71414, 0.]]]])
    yuv2rgb_bias = tf.constant([-179.45599365, 135.45983887, -226.81599426])

    yuv = tf.expand_dims(yuv, 0)
    temp = tf.nn.conv2d(yuv, yuv2rgb_filter, [1, 1, 1, 1], 'SAME')
    temp = tf.nn.bias_add(temp, yuv2rgb_bias)
    temp = tf.maximum(temp, tf.zeros(temp.get_shape(), dtype=tf.float32))
    temp = tf.minimum(temp,
                      tf.multiply(
                          tf.ones(temp.get_shape(), dtype=tf.float32), 255))
    temp = tf.divide(temp, 255)
    temp = tf.squeeze(temp, [0])
    return temp
项目:cap2vid    作者:Singularity42    | 项目源码 | 文件源码
def align(hid_align, h_dec, scope):
    h_dec_align = linear3(h_dec, dim_align, "h_dec_align_"+scope) #batch_size x dimAlign
    h_dec_align = tf.reshape(h_dec_align,[batch_size,1,dim_align])
    h_dec_align_tiled = tf.tile(h_dec_align, [1, sentence_length, 1])
    all_align = tf.tanh(h_dec_align + hid_align) 

    with tf.variable_scope("v_align_"+scope, reuse = DO_SHARE):
        v_align=tf.get_variable("v_align_"+scope, [dim_align], initializer=tf.constant_initializer(0.0))
    e_t = all_align * v_align
    e_t = tf.reduce_sum(e_t, 2)

    # normalise
    alpha = tf.nn.softmax(e_t) # batch_size x sentence_length
    alpha_t = tf.reshape(alpha, [batch_size, sentence_length, 1])
    alpha_tile = tf.tile(alpha_t, [1, 1, 2*y_enc_size])
    s_t = tf.multiply(alpha_tile, h_t_lang)

    s_t = tf.reduce_sum(s_t, 1)

    return s_t,alpha
项目:PixelDCN    作者:HongyangGao    | 项目源码 | 文件源码
def cal_loss(self):
        one_hot_labels = tf.one_hot(
            self.labels, depth=self.conf.class_num,
            axis=self.channel_axis, name='labels/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_labels, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_preds = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.labels, self.decoded_preds,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        # weights = tf.cast(
        #     tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
        #     tf.int32, name='m_iou/weights')
        weights = tf.cast(
            tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
            tf.int64, name='m_iou/weights')
        labels = tf.multiply(self.labels, weights, name='m_iou/mul')
        self.m_iou, self.miou_op = tf.metrics.mean_iou(
            self.labels, self.decoded_preds, self.conf.class_num,
            weights, name='m_iou/m_ious')
项目:DeepFM    作者:dwt0317    | 项目源码 | 文件源码
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def _meshgrid(self):
        with tf.variable_scope('_meshgrid'):
            x_t = tf.matmul(tf.ones(shape=tf.stack([self.out_height, 1])),
                            tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_width), 1), [1, 0]))
            y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_height), 1),
                            tf.ones(shape=tf.stack([1, self.out_width])))
            x_t_flat = tf.reshape(x_t, (1, -1))
            y_t_flat = tf.reshape(y_t, (1, -1))
            px,py = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2)
            #source control points
            x,y = tf.linspace(-1.,1.,self.Column_controlP_number),tf.linspace(-1.,1.,self.Row_controlP_number)
            x,y = tf.meshgrid(x,y)
            xs,ys = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1)))
            cpx,cpy = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2])
            px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy)           
            #Compute distance R
            Rx,Ry = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy))
            R = tf.add(Rx,Ry)          
            R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
            #Source coordinates
            ones = tf.ones_like(x_t_flat) 
            grid = tf.concat([ones, x_t_flat, y_t_flat,R],0)
            grid = tf.reshape(grid,[-1])
            grid = tf.reshape(grid,[self.Column_controlP_number*self.Row_controlP_number+3,self.out_height*self.out_width])
            return grid
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def L2(tensor, wd=0.001):
    """ L2.

    Computes half the L2 norm of a tensor without the `sqrt`:

      output = sum(t ** 2) / 2 * wd

    Arguments:
        tensor: `Tensor`. The tensor to apply regularization.
        wd: `float`. The decay.

    Returns:
        The regularization `Tensor`.

    """
    return tf.multiply(tf.nn.l2_loss(tensor), wd, name='L2-Loss')
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def L1(tensor, wd=0.001):
    """ L1.

    Computes the L1 norm of a tensor:

      output = sum(|t|) * wd

    Arguments:
        tensor: `Tensor`. The tensor to apply regularization.
        wd: `float`. The decay.

    Returns:
        The regularization `Tensor`.

    """
    return tf.multiply(tf.reduce_sum(tf.abs(tensor)), wd, name='L1-Loss')
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
项目:dlbench    作者:hclhkbu    | 项目源码 | 文件源码
def average_gradients(tower_grads):
    """Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    Args:
      tower_grads: List of lists of (gradient, variable) tuples. The outer list
        is over individual gradients. The inner list is over the gradient
        calculation for each tower.
    Returns:
       List of pairs of (gradient, variable) where the gradient has been averaged
       across all towers.
    """
    average_grads = []
    for single_grads in zip(*tower_grads):
        grads = [g for g, _ in single_grads]
        grad = tf.add_n(grads)
        grad = tf.multiply(grad, 1.0/len(grads))
        v = single_grads[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads
项目:Youtube-8M-WILLOW    作者:antoine77340    | 项目源码 | 文件源码
def SampleRandomFrames(model_input, num_frames, num_samples):
  """Samples a random set of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """
  batch_size = tf.shape(model_input)[0]
  frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, num_samples]),
          tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)
项目:tensorflow_face    作者:ZhihengCV    | 项目源码 | 文件源码
def image_preprocessing(image, train):
    """Decode and preprocess one image for evaluation or training.

    Args:
      image: JPEG
      train: boolean
    Returns:
      3-D float Tensor containing an appropriately scaled image

    Raises:
       ValueError: if user does not provide bounding box
    """
    with tf.name_scope('image_preprocessing'):
        if train:
            image = tf.image.random_flip_left_right(image)
            image = tf.image.random_brightness(image, 0.6)
            if FLAGS.image_channel >= 3:
                image = tf.image.random_saturation(image, 0.6, 1.4)
        # Finally, rescale to [-1,1] instead of [0, 1)
        image = tf.subtract(image, 0.5)
        image = tf.multiply(image, 2.0)
        image = tf.image.per_image_standardization(image)
        return image
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testComposedMaps(self):
    def preprocessing_fn(inputs):
      return {
          'a(b+c)': tf.multiply(
              inputs['a'], tf.add(inputs['b'], inputs['c']))
      }

    input_data = [{'a': 4, 'b': 3, 'c': 3}, {'a': 1, 'b': 2, 'c': 1}]
    input_metadata = dataset_metadata.DatasetMetadata({
        'a': sch.ColumnSchema(tf.float32, [], sch.FixedColumnRepresentation()),
        'b': sch.ColumnSchema(tf.float32, [], sch.FixedColumnRepresentation()),
        'c': sch.ColumnSchema(tf.float32, [], sch.FixedColumnRepresentation())
    })
    expected_data = [{'a(b+c)': 24}, {'a(b+c)': 3}]
    expected_metadata = dataset_metadata.DatasetMetadata({
        'a(b+c)': sch.ColumnSchema(
            tf.float32, [], sch.FixedColumnRepresentation())
    })
    self.assertAnalyzeAndTransformResults(
        input_data, input_metadata, preprocessing_fn, expected_data,
        expected_metadata)
项目:powerai-transfer-learning    作者:IBM    | 项目源码 | 文件源码
def should_distort_images(flip_left_right, random_crop, random_scale,
                          random_brightness):
  """Whether any distortions are enabled, from the input flags.

  Args:
    flip_left_right: Boolean whether to randomly mirror images horizontally.
    random_crop: Integer percentage setting the total margin used around the
    crop box.
    random_scale: Integer percentage of how much to vary the scale by.
    random_brightness: Integer range to randomly multiply the pixel values by.

  Returns:
    Boolean value indicating whether any distortions should be applied.
  """
  return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
          (random_brightness != 0))
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
  """Prepare one image for evaluation.
  If height and width are specified it would output an image with that size by
  applying resize_bilinear.
  If central_fraction is specified it would cropt the central fraction of the
  input image.
  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
    height: integer
    width: integer
    central_fraction: Optional Float, fraction of the image to crop.
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of prepared image.
  """
  with tf.name_scope(scope, 'eval_image', [image, height, width]):
    if image.dtype != tf.float32:
      image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    if central_fraction:
      image = tf.image.central_crop(image, central_fraction=central_fraction)

    if height and width:
      # Resize the image to the specified height and width.
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width],
                                       align_corners=False)
      image = tf.squeeze(image, [0])
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image
项目:A3C    作者:go2sea    | 项目源码 | 文件源码
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'):
    def f(e_list):
        return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5))
    # ??tf.layers?????flatten
    # dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
    if not isinstance(inputs, ops.Tensor):
        inputs = ops.convert_to_tensor(inputs, dtype='float')
        # dim_list = inputs.get_shape().as_list()
        # flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
        # reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
    if len(inputs.shape) > 2:
        inputs = tf.contrib.layers.flatten(inputs)
    flatten_shape = inputs.shape[1]
    weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
    w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i, collections=c_names)
    if noisy_distribution == 'independent':
        weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise)
    elif noisy_distribution == 'factorised':
        noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32))  # ???????????????
        noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32))
        weights += tf.multiply(noise_1 * noise_2, w_noise)
    dense = tf.matmul(inputs, weights)
    if bias_shape is not None:
        assert bias_shape[0] == units
        biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
        b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names)
        if noisy_distribution == 'independent':
            biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise)
        elif noisy_distribution == 'factorised':
            biases += tf.multiply(noise_2, b_noise)
        return activation(dense + biases) if activation is not None else dense + biases
    return activation(dense) if activation is not None else dense


# ???bias??????relu
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def SampleRandomSequence(model_input, num_frames, num_samples):
  """Samples a random sequence of frames of size num_samples.

  Args:
    model_input: A tensor of size batch_size x max_frames x feature_size
    num_frames: A tensor of size batch_size x 1
    num_samples: A scalar

  Returns:
    `model_input`: A tensor of size batch_size x num_samples x feature_size
  """

  batch_size = tf.shape(model_input)[0]
  frame_index_offset = tf.tile(
      tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1])
  max_start_frame_index = tf.maximum(num_frames - num_samples, 0)
  start_frame_index = tf.cast(
      tf.multiply(
          tf.random_uniform([batch_size, 1]),
          tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32)
  frame_index = tf.minimum(start_frame_index + frame_index_offset,
                           tf.cast(num_frames - 1, tf.int32))
  batch_index = tf.tile(
      tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
  index = tf.stack([batch_index, frame_index], 2)
  return tf.gather_nd(model_input, index)