Python tensorflow 模块,add() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.add()

项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._gru_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)
        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]
        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
项目:facerecognition    作者:guoxiaolu    | 项目源码 | 文件源码
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
项目:variational-text-tensorflow    作者:carpedm20    | 项目源码 | 文件源码
def build_encoder(self):
    """Inference Network. q(h|X)"""
    with tf.variable_scope("encoder"):
      self.l1_lin = linear(tf.expand_dims(self.x, 0), self.embed_dim, bias=True, scope="l1")
      self.l1 = tf.nn.relu(self.l1_lin)

      self.l2_lin = linear(self.l1, self.embed_dim, bias=True, scope="l2")
      self.l2 = tf.nn.relu(self.l2_lin)

      self.mu = linear(self.l2, self.h_dim, bias=True, scope="mu")
      self.log_sigma_sq = linear(self.l2, self.h_dim, bias=True, scope="log_sigma_sq")

      self.eps = tf.random_normal((1, self.h_dim), 0, 1, dtype=tf.float32)
      self.sigma = tf.sqrt(tf.exp(self.log_sigma_sq))

      self.h = tf.add(self.mu, tf.mul(self.sigma, self.eps))

      _ = tf.histogram_summary("mu", self.mu)
      _ = tf.histogram_summary("sigma", self.sigma)
      _ = tf.histogram_summary("h", self.h)
      _ = tf.histogram_summary("mu + sigma", self.mu + self.sigma)
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:a-nice-mc    作者:ermongroup    | 项目源码 | 文件源码
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
    def leapfrog(pos, vel, step, i):
        de_dp_ = tf.gradients(tf.reduce_sum(energy_fn(pos)), pos)[0]
        new_vel_ = vel - step * de_dp_
        new_pos_ = pos + step * new_vel_
        return [new_pos_, new_vel_, step, tf.add(i, 1)]

    def condition(pos, vel, step, i):
        return tf.less(i, n_steps)

    de_dp = tf.gradients(tf.reduce_sum(energy_fn(initial_pos)), initial_pos)[0]
    vel_half_step = initial_vel - 0.5 * stepsize * de_dp
    pos_full_step = initial_pos + stepsize * vel_half_step

    i = tf.constant(0)
    final_pos, new_vel, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step, vel_half_step, stepsize, i])
    de_dp = tf.gradients(tf.reduce_sum(energy_fn(final_pos)), final_pos)[0]
    final_vel = new_vel - 0.5 * stepsize * de_dp
    return final_pos, final_vel
项目:deep_architect    作者:negrinho    | 项目源码 | 文件源码
def compile(self, in_x, train_feed, eval_feed):
        n = np.product(self.in_d)
        m, param_init_fn = [dom[i] for (dom, i) in zip(self.domains, self.chosen)]

        #sc = np.sqrt(6.0) / np.sqrt(m + n)
        #W = tf.Variable(tf.random_uniform([n, m], -sc, sc))
        W = tf.Variable( param_init_fn( [n, m] ) )
        b = tf.Variable(tf.zeros([m]))

        # if the number of input dimensions is larger than one, flatten the 
        # input and apply the affine transformation. 
        if len(self.in_d) > 1:
            in_x_flat = tf.reshape(in_x, shape=[-1, n])
            out_y = tf.add(tf.matmul(in_x_flat, W), b)
        else:
            out_y = tf.add(tf.matmul(in_x, W), b)
        return out_y

# computes the output dimension based on the padding scheme used.
# this comes from the tensorflow documentation
项目:deep_architect    作者:negrinho    | 项目源码 | 文件源码
def compile(self, in_x, train_feed, eval_feed):
        in_height, in_width, in_nchannels = self.in_d 
        nfilters, filter_len, stride, padding, param_init_fn = [dom[i] 
                for (dom, i) in zip(self.domains, self.chosen)]

        # Creation and initialization of the parameters. Should take size of 
        # the filter into account.
        W = tf.Variable(
                param_init_fn( [filter_len, filter_len, in_nchannels, nfilters]) )
        b = tf.Variable(tf.zeros([nfilters]))

        # create the output and add the bias.
        out_yaux = tf.nn.conv2d(in_x, W, strides=[1, stride, stride, 1], padding=padding)
        out_y = tf.nn.bias_add(out_yaux, b)

        #print(in_x.get_shape(), self.get_outdim(), out_y.get_shape())

        return out_y
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def _shortcut(inputs, x): # x = f(inputs)
    # shortcut path
    _, inputs_h, inputs_w, inputs_ch = inputs.shape.as_list()
    _, x_h, x_w, x_ch = x.shape.as_list()
    stride_h = int(round(inputs_h / x_h))
    stride_w = int(round(inputs_w / x_w))
    equal_ch = inputs_ch == x_ch

    if stride_h>1 or stride_w>1 or not equal_ch:
        shortcut = tcl.conv2d(inputs,
                              num_outputs = x_ch,
                              kernel_size = (1, 1),
                              stride = (stride_h, stride_w),
                              padding = 'VALID')
    else:
        shortcut = inputs

    merged = tf.add(shortcut, x)
    return merged
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def inference(self):
        """ building blocks:
        encoder:6 layers.each layers has two   sub-layers. the first is multi-head self-attention mechanism; the second is position-wise fully connected feed-forward network.
               for each sublayer. use LayerNorm(x+Sublayer(x)). all dimension=512.
        decoder:6 layers.each layers has three sub-layers. the second layer is performs multi-head attention over the ouput of the encoder stack.
               for each sublayer. use LayerNorm(x+Sublayer(x)).
        """
        # 1.embedding for encoder input & decoder input
        # 1.1 position embedding for encoder input
        input_x_embeded = tf.nn.embedding_lookup(self.Embedding,self.input_x)  #[None,sequence_length, embed_size]
        input_x_embeded=tf.multiply(input_x_embeded,tf.sqrt(tf.cast(self.d_model,dtype=tf.float32)))
        input_mask=tf.get_variable("input_mask",[self.sequence_length,1],initializer=self.initializer)
        input_x_embeded=tf.add(input_x_embeded,input_mask) #[None,sequence_length,embed_size].position embedding.

        # 2. encoder
        encoder_class=Encoder(self.d_model,self.d_k,self.d_v,self.sequence_length,self.h,self.batch_size,self.num_layer,input_x_embeded,input_x_embeded,dropout_keep_prob=self.dropout_keep_prob,use_residual_conn=self.use_residual_conn)
        Q_encoded,K_encoded = encoder_class.encoder_fn() #K_v_encoder

        Q_encoded=tf.reshape(Q_encoded,shape=(self.batch_size,-1)) #[batch_size,sequence_length*d_model]
        with tf.variable_scope("output"):
            logits = tf.matmul(Q_encoded, self.W_projection) + self.b_projection #logits shape:[batch_size*decoder_sent_length,self.num_classes]
        print("logits:",logits)
        return logits
项目:KATE    作者:hugochan    | 项目源码 | 文件源码
def kSparse(self, x, topk):
        print 'run regular k-sparse'
        dim = int(x.get_shape()[1])
        if topk > dim:
            warnings.warn('Warning: topk should not be larger than dim: %s, found: %s, using %s' % (dim, topk, dim))
            topk = dim

        k = dim - topk
        values, indices = tf.nn.top_k(-x, k) # indices will be [[0, 1], [2, 1]], values will be [[6., 2.], [5., 4.]]

        # We need to create full indices like [[0, 0], [0, 1], [1, 2], [1, 1]]
        my_range = tf.expand_dims(tf.range(0, tf.shape(indices)[0]), 1)  # will be [[0], [1]]
        my_range_repeated = tf.tile(my_range, [1, k])  # will be [[0, 0], [1, 1]]

        full_indices = tf.stack([my_range_repeated, indices], axis=2) # change shapes to [N, k, 1] and [N, k, 1], to concatenate into [N, k, 2]
        full_indices = tf.reshape(full_indices, [-1, 2])

        to_reset = tf.sparse_to_dense(full_indices, tf.shape(x), tf.reshape(values, [-1]), default_value=0., validate_indices=False)

        res = tf.add(x, to_reset)

        return res
项目:SRGAN-tensorflow    作者:zoharli    | 项目源码 | 文件源码
def __init__(self,T,train_mode=1,name='srResNet'):
        with tf.variable_scope(name):
            self.train_mode=train_mode
            conv1=conv_layer(T,[5,5,3,64],1)
            relu1=leaky_relu(conv1)
            block=[]
            for i in xrange(16):
                block.append(self.residual_block(block[-1] if i else relu1))
            conv2=conv_layer(block[-1],[3,3,64,64],1)
            bn1=batch_norm(conv2) if self.train_mode else conv2
            sum1=tf.add(bn1,relu1)
            conv3=conv_layer(sum1,[3,3,64,256],1)
            ps1=tf.depth_to_space(conv3,2) #pixel-shuffle
            relu2=leaky_relu(ps1)
            conv4=conv_layer(relu2,[3,3,64,256],1)
            ps2=tf.depth_to_space(conv4,2)
            relu3=leaky_relu(ps2)
            self.conv5=conv_layer(relu3,[3,3,64,3],1)
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def feed_network(self,data,keep_prob,chunk_size,n_chunks, dynamic):
        # This code is copied from tflearn
        sequence_lengths = None
        if dynamic:
            sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data))
        batch_size = tf.shape(data)[0]
        weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob)
        rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob)

        # Calculation Begin
        input_shape = data.get_shape().as_list()
        ndim = len(input_shape)
        axis = [1, 0] + list(range(2,ndim))
        data = tf.transpose(data,(axis))
        sequence = tf.unstack(data)
        outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths)

        if dynamic:
            outputs = tf.transpose(tf.stack(outputs), [1, 0, 2])
            output = net.advanced_indexing_op(outputs, sequence_lengths)
        else:
            output = outputs[-1]

        output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases)
        return output
项目:3D_CNN_jonas    作者:2015ZxEE    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name   -> name of the variable
        shape  -> list of ints
        stddev -> standard deviation of a truncated Gaussian
        wd     -> add L2Loss weight decay multiplied by this float.
                        If None, weight decay is not added for this Variable.
    Rtns:
        var    -> variable tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var   = variable_on_cpu(name,shape,
                    tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var),wd,name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:DeepLearning    作者:STHSF    | 项目源码 | 文件源码
def add_layers(inputs, in_size, out_size, layer_name, keep_prob, activation_function=None):

    # add one more layer and return the output of this layer
    weights = tf.Variable(tf.random_normal([in_size, out_size]))
    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
    wx_plus_b = tf.matmul(inputs, weights) + biases

    # here to dropout
    # ? wx_plus_b ?drop?????
    # keep_prob ??????drop?????? sess.run ? feed
    wx_plus_b = tf.nn.dropout(wx_plus_b, keep_prob)

    if activation_function is None:
        outputs = wx_plus_b
    else:
        outputs = activation_function(wx_plus_b)

    tf.histogram_summary(layer_name + '/outputs', outputs)

    return outputs
项目:DeepLearning    作者:STHSF    | 项目源码 | 文件源码
def add_layer(inputs, in_size, out_size, activation_function=None):

    # add one more layer and return the output of this layer
    # ?????????? layer???? ???
    with tf.name_scope('layer'):
        # ??????
        with tf.name_scope('weights_1'):
            weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
        with tf.name_scope('biases_1'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
        with tf.name_scope('wx_plus_b'):
            wx_plus_b = tf.add(tf.matmul(inputs, weights), biases)

            # here to dropout, ? wx_plus_b ?drop?????, keep_prob ??????drop?????? sess.run ? feed
            wx_plus_b = tf.nn.dropout(wx_plus_b, keep_prob=1)

        if activation_function is None:
            outputs = wx_plus_b
        else:
            outputs = activation_function(wx_plus_b, )

        return outputs

# define placeholder for inputs to network
# ?????????? inputs x?y
项目:tensorflow-basic    作者:weaponsjtu    | 项目源码 | 文件源码
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    # Create a summary to visualize the first layer ReLU activation
    tf.summary.histogram("relu1", layer_1)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    # Create another summary to visualize the second layer ReLU activation
    tf.summary.histogram("relu2", layer_2)
    # Output layer
    out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
    return out_layer

# Store layers weight & bias
项目:website-fingerprinting    作者:AxelGoetz    | 项目源码 | 文件源码
def _get_layer(self, layer_input, size_last_layer, size_current_layer):
        """
        Returns a layer with a batch normalized input, depending on the `batch_norm flag`

        @param layer_input is the value used as an input to the layer.
        @param size_last_layer is the size of the last layer (used in weight) or the size of the input
        @param size_current_layer is the size of the current layer (used in weight and bias)
        """
        weight = tf.Variable(tf.random_normal([size_last_layer, size_current_layer]))
        bias = tf.Variable(tf.random_normal([size_current_layer]))

        if not self.batch_norm:
            return self.activation_func(tf.add(tf.matmul(layer_input, weight), bias))


        layer_input = tf.contrib.layers.batch_norm(layer_input,
                                                   center=True, scale=True,
                                                   is_training=self.is_training,
                                                   scope='bn{}-{}'.format(size_last_layer, size_current_layer))

        return self.activation_func(tf.add(tf.matmul(layer_input, weight), bias))
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_forward_declarations(self):
    # Define a simple expression data structure
    nlit = lambda x: {'op': 'lit', 'val': x}
    nadd = lambda x, y: {'op': 'add', 'left': x, 'right': y}
    nexpr = nadd(nadd(nlit(3.0), nlit(5.0)), nlit(2.0))

    # Define a recursive block using forward declarations
    expr_fwd = tdb.ForwardDeclaration(tdt.PyObjectType(),
                                      tdt.TensorType((), 'float32'))
    lit_case = tdb.GetItem('val') >> tdb.Scalar()
    add_case = (tdb.Record({'left': expr_fwd(), 'right': expr_fwd()})
                >> tdb.Function(tf.add))
    expr = tdb.OneOf(lambda x: x['op'], {'lit': lit_case, 'add': add_case})
    expr_fwd.resolve_to(expr)

    self.assertBuilds(10.0, expr, nexpr, max_depth=2)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_constant_network_with_tags(self):
    shape1 = loom.TypeShape('int64', (3,), 'alpha')
    shape2 = loom.TypeShape('int64', (3,), 'beta')
    value1 = np.array([1, 2, 3], dtype='int64')
    value2 = np.array([4, 5, 6], dtype='int64')
    ops = {'add1': BinaryLoomOp(shape1, tf.add),
           'add2': BinaryLoomOp(shape2, tf.add)}
    the_loom = loom.Loom(named_ops=ops)
    output_tensor1 = the_loom.output_tensor(shape1)
    output_tensor2 = the_loom.output_tensor(shape2)
    with self.test_session():
      weaver = the_loom.make_weaver()
      c1 = weaver(value1, tag='alpha')
      c2 = weaver(value2, tag='beta')
      result1 = output_tensor1.eval(
          feed_dict=weaver.build_feed_dict([c2, c1]))
      result2 = output_tensor2.eval(
          feed_dict=weaver.build_feed_dict([c2, c1]))
    self.assertTrue((result1[0] == value1).all())
    self.assertTrue((result2[0] == value2).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_constant_network_with_tags_dry_run(self):
    shape1 = loom.TypeShape('int64', (3,), 'alpha')
    shape2 = loom.TypeShape('int64', (3,), 'beta')
    value1 = np.array([1, 2, 3], dtype='int64')
    value2 = np.array([4, 5, 6], dtype='int64')
    ops = {'add1': BinaryLoomOp(shape1, tf.add),
           'add2': BinaryLoomOp(shape2, tf.add)}
    the_loom = loom.Loom(named_ops=ops, dry_run=True)
    output_tensor1 = the_loom.output_tensor(shape1)
    output_tensor2 = the_loom.output_tensor(shape2)
    with self.test_session():
      weaver = the_loom.make_weaver()
      c1 = weaver(value1, tag='alpha')
      c2 = weaver(value2, tag='beta')
      result1 = output_tensor1.eval(
          feed_dict=weaver.build_feed_dict([c2, c1]))
      result2 = output_tensor2.eval(
          feed_dict=weaver.build_feed_dict([c2, c1]))
    zero_vec = np.zeros_like(value1)
    self.assertTrue((result1[0] == zero_vec).all())
    self.assertTrue((result2[0] == zero_vec).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_two_layer_sum_network(self):
    shape = loom.TypeShape('int64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add)}
    the_loom = loom.Loom(named_ops=ops)
    output_tensor = the_loom.output_tensor(shape)
    with self.test_session():
      weaver = the_loom.make_weaver()
      c1 = weaver(np.array([1, 2, 3], dtype='int64'))
      c2 = weaver(np.array([2, 4, 6], dtype='int64'))
      c3 = weaver(np.array([3, 6, 9], dtype='int64'))
      c4 = weaver(np.array([4, 8, 12], dtype='int64'))
      sum_1_2 = weaver.add(c1, c2)
      sum_3_4 = weaver.add(c3, c4)
      sum_1_2_3_4 = weaver.add(sum_1_2, sum_3_4)
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([sum_1_2_3_4]))
    self.assertTrue((result == np.array([[10, 20, 30]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_three_layer_sum_network(self):
    shape = loom.TypeShape('int64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add)}
    the_loom = loom.Loom(named_ops=ops)
    output_tensor = the_loom.output_tensor(shape)

    with self.test_session():
      weaver = the_loom.make_weaver()
      vals = [weaver(np.array([0, 1, 1 << k], dtype='int64'))
              for k in range(8)]
      for _ in xrange(3):
        vals = [weaver.add(*args) for args in group_values(vals, 2)]
      big_sum = vals[0]
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([big_sum]))
    self.assertTrue((result == np.array([[0, 8, 255]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_two_ops_network(self):
    shape = loom.TypeShape('int64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_ops=ops)
    output_tensor = the_loom.output_tensor(shape)
    with self.test_session():
      weaver = the_loom.make_weaver()
      c1 = weaver(np.array([1, 2, 3], dtype='int64'))
      c2 = weaver(np.array([2, 4, 6], dtype='int64'))
      c3 = weaver(np.array([3, 6, 9], dtype='int64'))
      sum_2_3 = weaver.add(c2, c3)
      sum_12_13 = weaver.mul(c1, sum_2_3)
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([sum_12_13]))
    self.assertTrue((result == np.array([[5, 20, 45]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_two_ops_network_tagged_named_tensorx(self):
    shape = loom.TypeShape('int64', (3,), tag='x')
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    named_tensors = {
        'c1': (tf.constant(np.array([1, 2, 3], dtype='int64')), 'x'),
        'c2': (tf.constant(np.array([2, 4, 6], dtype='int64')), 'x'),
        'c3': (tf.constant(np.array([3, 6, 9], dtype='int64')), 'x')
    }
    the_loom = loom.Loom(named_ops=ops, named_tensors=named_tensors)
    output_tensor = the_loom.output_tensor(shape)
    with self.test_session():
      weaver = the_loom.make_weaver()
      sum_2_3 = weaver.add(weaver.c2, weaver.c3)
      sum_12_13 = weaver.mul(weaver.c1, sum_2_3)
      result = output_tensor.eval(
          feed_dict=weaver.build_feed_dict([sum_12_13]))
    self.assertTrue((result == np.array([[5, 20, 45]], dtype='int64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_gradient(self):
    x_var = tf.Variable(tf.zeros([3], dtype='float64'), name='x')
    shape = loom.TypeShape('float64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_tensors={'x': x_var}, named_ops=ops)

    output_tensor = the_loom.output_tensor(shape)
    output = tf.reduce_sum(output_tensor)
    gradient = tf.gradients(output, [x_var])[0]
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())

      weaver = the_loom.make_weaver()
      m = weaver(np.array([1, 2, 3], dtype='float64'))
      b = weaver(np.array([47, 9, -1], dtype='float64'))
      mx = weaver.mul(m, weaver.x)
      mx_plus_b = weaver.add(mx, b)
      result = gradient.eval(feed_dict=weaver.build_feed_dict([mx_plus_b]))
    self.assertTrue((result == np.array(
        [1.0, 2.0, 3.0], dtype='float64')).all())
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_gradient_with_direct_feed_dict(self):
    x_var = tf.Variable(tf.zeros([3], dtype='float64'), name='x')
    shape = loom.TypeShape('float64', (3,))
    ops = {'add': BinaryLoomOp(shape, tf.add),
           'mul': BinaryLoomOp(shape, tf.multiply)}
    the_loom = loom.Loom(named_tensors={'x': x_var}, named_ops=ops,
                         direct_feed_dict=True)

    output_tensor = the_loom.output_tensor(shape)
    output = tf.reduce_sum(output_tensor)
    gradient = tf.gradients(output, [x_var])[0]
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())

      weaver = the_loom.make_weaver()
      m = weaver(np.array([1, 2, 3], dtype='float64'))
      b = weaver(np.array([47, 9, -1], dtype='float64'))
      mx = weaver.mul(m, weaver.x)
      mx_plus_b = weaver.add(mx, b)
      result = gradient.eval(feed_dict=weaver.build_feed_dict([mx_plus_b]))
    self.assertTrue((result == np.array(
        [1.0, 2.0, 3.0], dtype='float64')).all())
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def create_generator_loss(disc_output, gene_output, features):
    # I.e. did we fool the discriminator?
    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=disc_output, logits=tf.ones_like(disc_output))
    gene_ce_loss  = tf.reduce_mean(cross_entropy, name='gene_ce_loss')

    # I.e. does the result look like the feature?
    K = int(gene_output.get_shape()[1])//int(features.get_shape()[1])
    assert K == 2 or K == 4 or K == 8    
    downscaled = _downscale(gene_output, K)

    gene_l1_loss  = tf.reduce_mean(tf.abs(downscaled - features), name='gene_l1_loss')

    gene_loss     = tf.add((1.0 - FLAGS.gene_l1_factor) * gene_ce_loss,
                           FLAGS.gene_l1_factor * gene_l1_loss, name='gene_loss')

    return gene_loss
项目:cloudml-samples    作者:GoogleCloudPlatform    | 项目源码 | 文件源码
def make_feature_columns():
  """Retrieve the feature columns required for training."""
  feature_columns = (make_query_feature_columns()
                     | make_candidate_feature_columns())
  # Add feature column for the label.
  target_rating_real_column = tf.contrib.layers.real_valued_column(
      column_name=LABEL_RATING_SCORE, dtype=tf.float32)
  feature_columns.add(target_rating_real_column)

  # Ranking candidate movies used only in eval graph to rank candidate movie
  # against.
  ranking_candidate_movie_ids = (
      tf.contrib.layers.sparse_column_with_integerized_feature(
          column_name=RANKING_CANDIDATE_MOVIE_IDS,
          bucket_size=MOVIE_VOCAB_SIZE))
  feature_columns.add(ranking_candidate_movie_ids)

  return feature_columns
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:deep-time-reading    作者:felixduvallet    | 项目源码 | 文件源码
def inference(images, num_classes):
    """ Build a time reading model for *either* hours or minutes.

    Args:
      images: Images returned from distorted_inputs() or inputs().
      num_classes: 12 for hours, 60 for minutes.

    Returns:
      Logits.
    """

    local4 = _inference_shared(images)

    dim = num_classes

    # softmax, i.e. softmax(WX + b)
    with tf.variable_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, dim],
                                              stddev=1 / 192.0, wd=0.0)
        biases = _variable_on_cpu('biases', [dim],
                                  tf.constant_initializer(0.0))
        softmax_linear = tf.add(tf.matmul(local4, weights), biases,
                                name=scope.name)
        _activation_summary(softmax_linear)
    return softmax_linear
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __init__(self, incoming, pattern, **kwargs):
        super(DimshuffleLayer, self).__init__(incoming, **kwargs)

        # Sanity check the pattern
        used_dims = set()
        for p in pattern:
            if isinstance(p, int):
                # Dimension p
                if p in used_dims:
                    raise ValueError("pattern contains dimension {0} more "
                                     "than once".format(p))
                used_dims.add(p)
            elif p == 'x':
                # Broadcast
                pass
            else:
                raise ValueError("pattern should only contain dimension"
                                 "indices or 'x', not {0}".format(p))

        self.pattern = pattern

        # try computing the output shape once as a sanity check
        self.get_output_shape_for(self.input_shape)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def unique(l):
    """Filters duplicates of iterable.
    Create a new list from l with duplicate entries removed,
    while preserving the original order.
    Parameters
    ----------
    l : iterable
        Input iterable to filter of duplicates.
    Returns
    -------
    list
        A list of elements of `l` without duplicates and in the same order.
    """
    new_list = []
    seen = set()
    for el in l:
        if el not in seen:
            new_list.append(el)
            seen.add(el)

    return new_list
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_tensor_conversion(self):
        with BayesianNet(observed={'a': 1., 'c': 1.}):
            a = StochasticTensor('a', Mock(dtype=tf.float32), 1)
            b = tf.add(1., a)
            c = StochasticTensor('c', Mock(dtype=tf.float32), 1)
            # tensorflow will try to convert c to the same type with 1 (int32)
            # calling the registered tensor conversion function of c.
            # If failed, it will try not to request the type. So an error
            # will be raised by the operator.
            with self.assertRaisesRegexp(
                    TypeError, "type float32.*not match.*type int32"):
                _ = tf.add(1, c)
        with self.test_session(use_gpu=True):
            self.assertNear(b.eval(), 2., 1e-6)
        with self.assertRaisesRegexp(ValueError, "Ref type not supported"):
            _ = StochasticTensor._to_tensor(a, as_ref=True)
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def _map(self, example_serialized, features=None):
        """
        Maps a example_serialized read from the dataset into the final set of tf.Tensors
        to return to the model.

        Simple example:

        def _parse(line, features=None):
            a, b = [np.int32(x) for x in line.split()]
            return a, b

        t_input, t_ouptut = tf.py_func(_parse, [line], [tf.int32, tf.int32],
                                       stateful=True, name='py_parse_example')
        t_ouptut = tf.add(t_ouptut, 1)

        return t_input, t_ouptut

        :param example_serialized: the example serialized
        :param features: do not use this as it is deprecated after 1.2
        :return: a tuple of the tensors to return when get_next is called. Usually (inputs,outputs)
        """
        pass
项目:EveryBodyTensorFlow    作者:jwkanggist    | 项目源码 | 文件源码
def neural_net(x):
    # Hidden fully connected layer with 7 neurons
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)

    # Hidden fully connected layer with 7 neurons
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)

    # Hidden fully connected layer with 4 neurons
    layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    layer_3 = tf.nn.relu(layer_3)

    # Output fully connected layer with a neuron for each class
    out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
    return out_layer

# Construct model
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def _double_conv_layer_wrapper(self, input1, input2, out_feature_maps,
                                   filter_length, is_train):
        '''Two parallele convolution layers for each channel
        using shared weights'''
        filter_width = input1.get_shape()[1].value
        in_feature_maps = input1.get_shape()[-1].value
        # shared weights
        W_conv = weight_variable(
            [filter_width, filter_length, in_feature_maps, out_feature_maps],
            regularizer=tf.contrib.layers.l2_regularizer(self.reg_fac))
        # shared bias
        b_conv = bias_variable([out_feature_maps])
        h_conv_t1 = tf.add(conv2d(input1, W_conv), b_conv)
        h_conv_b1 = self._batch_norm_wrapper(h_conv_t1, is_train)
        h_conv_t2 = tf.add(conv2d(input2, W_conv), b_conv)
        h_conv_b2 = self._batch_norm_wrapper(h_conv_t2, is_train)
        return tf.nn.relu(h_conv_b1), tf.nn.relu(h_conv_b2)
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def _create_network(self):
        # Initialize autoencode network weights and biases
        network_weights = self._initialize_weights(**self.network_architecture)

        # Use recognition network to determine mean and 
        # (log) variance of Gaussian distribution in latent
        # space
        self.z_mean, self.z_log_sigma_sq = \
            self._recognition_network(network_weights["weights_recog"], 
                                      network_weights["biases_recog"])

        # Draw one sample z from Gaussian distribution
        n_z = self.network_architecture["n_z"]
        eps = tf.random_normal((self.batch_size, n_z), 0, 1, 
                               dtype=tf.float32)
        # z = mu + sigma*epsilon
        self.z = tf.add(self.z_mean, 
                        tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        # Use generator to determine mean of
        # Bernoulli distribution of reconstructed input
        self.x_reconstr_mean = \
            self._generator_network(network_weights["weights_gener"],
                                    network_weights["biases_gener"])
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val})
项目:DeepFM    作者:dwt0317    | 项目源码 | 文件源码
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:facial-emotion-detection-dl    作者:dllatas    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def kernel_pred(x_data, prediction_grid):
    A = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    B = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    square_distance = tf.add(tf.subtract(A, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))),
                             tf.transpose(B))
    return tf.exp(tf.multiply(gamma, tf.abs(square_distance)))
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()