Python tensorflow 模块,zeros_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.zeros_like()

项目:attend_infer_repeat    作者:akosiorek    | 项目源码 | 文件源码
def masked_apply(tensor, op, mask):
    """Applies `op` to tensor only at locations indicated by `mask` and sets the rest to zero.

    Similar to doing `tensor = tf.where(mask, op(tensor), tf.zeros_like(tensor))` but it behaves correctly
    when `op(tensor)` is NaN or inf while tf.where does not.

    :param tensor: tf.Tensor
    :param op: tf.Op
    :param mask: tf.Tensor with dtype == bool
    :return: tf.Tensor
    """
    chosen = tf.boolean_mask(tensor, mask)
    applied = op(chosen)
    idx = tf.to_int32(tf.where(mask))
    result = tf.scatter_nd(idx, applied, tf.shape(tensor))
    return result
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
                                           name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,),
                                          name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
                            + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def generate_mask(img_mask_list, h, w, l):
    img_masks, loss_masks = [], []

    for i in range(l):
        # generate image mask
        img_mask = img_mask_list[i]
        img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
        img_mask = tf.reshape(img_mask, (h, w))
        img_masks.append(img_mask)

        # generate loss mask
        s_total   = h * w
        s_mask    = tf.reduce_sum(img_mask)
        def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
        def f2(): return tf.zeros_like(img_mask)
        def f3(): return tf.ones_like(img_mask)
        loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
                             (tf.less(s_mask, s_total/2), f1)],
                             default=f3)

        loss_masks.append(loss_mask)

    return tf.stack(img_masks), tf.stack(loss_masks)
项目:tensorflow_ocr    作者:BowieHsu    | 项目源码 | 文件源码
def OHNM_single_image(scores, n_pos, neg_mask):
    """Online Hard Negative Mining.
    scores: the scores of being predicted as negative cls
    n_pos: the number of positive samples 
    neg_mask: mask of negative samples
    Return:
    the mask of selected negative samples.
    if n_pos == 0, no negative samples will be selected.
    """
    def has_pos():
        n_neg = n_pos * 3
        max_neg_entries = tf.reduce_sum(tf.cast(neg_mask, tf.int32))
        n_neg = tf.minimum(n_neg, max_neg_entries)
        n_neg = tf.cast(n_neg, tf.int32)
        neg_conf = tf.boolean_mask(scores, neg_mask)
        vals, _ = tf.nn.top_k(-neg_conf, k=n_neg)
        threshold = vals[-1]# a negtive value
        selected_neg_mask = tf.logical_and(neg_mask, scores <= -threshold)
        return tf.cast(selected_neg_mask, tf.float32)

    def no_pos():
        return tf.zeros_like(neg_mask, tf.float32)

    return tf.cond(n_pos > 0, has_pos, no_pos)
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
        discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
                                                           name="disc_real_loss")

        discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
                                                           name="disc_fake_loss")
        self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real

        gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
        if use_features:
            gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
        else:
            gen_loss_features = 0
        self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features

        tf.scalar_summary("Discriminator_loss", self.discriminator_loss)
        tf.scalar_summary("Generator_loss", self.gen_loss)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def _create(self, d_real, d_fake):
        ops = self.ops
        config = self.config
        gan = self.gan

        generator_target_probability = config.generator_target_probability or 0.8
        label_smooth = config.label_smooth or 0.2

        zeros = tf.zeros_like(d_fake)
        ones = tf.ones_like(d_fake)
        if config.improved:
            g_loss = self.sigmoid_kl_with_logits(d_fake, generator_target_probability)
            d_loss = self.sigmoid_kl_with_logits(d_real, 1.-label_smooth) + \
                    tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
        else:
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
            d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=zeros) + \
                     tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=ones)

        return [d_loss, g_loss]
项目:GAN-Sentence    作者:huseinzol05    | 项目源码 | 文件源码
def __init__(self, num_layers, size_layer, dimension_input, len_noise, sequence_size, learning_rate):
        self.noise = tf.placeholder(tf.float32, [None, None, len_noise])
        self.fake_input = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.true_sentence = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.initial_layer = generator_encode(self.noise, num_layers, size_layer, len_noise)
        self.final_outputs = generator_sentence(self.fake_input, self.initial_layer, num_layers, size_layer, dimension_input)
        fake_logits = discriminator(self.final_outputs, num_layers, size_layer, dimension_input)
        true_logits = discriminator(self.true_sentence, num_layers, size_layer, dimension_input, reuse = True)
        d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = true_logits, labels = tf.ones_like(true_logits)))
        d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.zeros_like(fake_logits)))
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.ones_like(fake_logits)))

        self.d_loss = d_loss_real + d_loss_fake
        d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'discriminator')
        g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_encode') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sentence')
        self.d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.d_loss, var_list = d_vars)
        self.g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.g_loss, var_list = g_vars)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def get_acceptance_rate(q, p, new_q, new_p, log_posterior, mass, data_axes):
    old_hamiltonian, old_log_prob = hamiltonian(
        q, p, log_posterior, mass, data_axes)
    new_hamiltonian, new_log_prob = hamiltonian(
        new_q, new_p, log_posterior, mass, data_axes)
    old_log_prob = tf.check_numerics(
        old_log_prob,
        'HMC: old_log_prob has numeric errors! Try better initialization.')
    acceptance_rate = tf.exp(
        tf.minimum(-new_hamiltonian + old_hamiltonian, 0.0))
    is_finite = tf.logical_and(tf.is_finite(acceptance_rate),
                               tf.is_finite(new_log_prob))
    acceptance_rate = tf.where(is_finite, acceptance_rate,
                               tf.zeros_like(acceptance_rate))
    return old_hamiltonian, new_hamiltonian, old_log_prob, new_log_prob, \
        acceptance_rate
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
    y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
    y0 = tf.to_int32(tf.greater(y0, 0.5))

    def _cond(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
        y = tf.to_int32(tf.greater(y, 0.5))
        return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))

    def _body(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.reshape(model(xadv), [-1])[0]
        g = tf.gradients(y, xadv)[0]
        dx = - y * g / tf.norm(g)
        return i+1, z+dx

    _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
                             name='_deepfool2_impl', back_prop=False)
    return noise
项目:crfasrnn_keras    作者:sadeepj    | 项目源码 | 文件源码
def _high_dim_filter_grad(op, grad):
    """ Gradients for the HighDimFilter op. We only need to calculate the gradients
    w.r.t. the first input (unaries) as we never need to backprop errors to the
    second input (RGB values of the image).

    Args:
    op: The `high_dim_filter` operation that we are differentiating.
    grad: Gradients with respect to the output of the `high_dim_filter` op.

    Returns:
    Gradients with respect to the input of `high_dim_filter`.
    """

    rgb = op.inputs[1]
    grad_vals = custom_module.high_dim_filter(grad, rgb,
                                              bilateral=op.get_attr('bilateral'),
                                              theta_alpha=op.get_attr('theta_alpha'),
                                              theta_beta=op.get_attr('theta_beta'),
                                              theta_gamma=op.get_attr('theta_gamma'),
                                              backwards=True)

    return [grad_vals, tf.zeros_like(rgb)]
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def _max_pool_grad_grad(dy, x, y, ksize, strides, padding, argmax=None):
  """Gradients of MaxPoolGrad."""
  if argmax is None:
    _, argmax = tf.nn.max_pool_with_argmax(x, ksize, strides, padding)
  grad = dy
  grad_flat = tf.reshape(grad, [-1])
  argmax_flat = tf.reshape(argmax, [-1])

  x_shape = tf.cast(tf.shape(x), argmax.dtype)
  batch_dim = tf.reshape(
      tf.range(
          x_shape[0], dtype=argmax.dtype), [-1, 1, 1, 1])
  nelem = tf.reduce_prod(x_shape[1:])
  batch_dim *= nelem

  y_zero = tf.zeros_like(y, dtype=argmax.dtype)
  batch_dim += y_zero
  batch_dim = tf.reshape(batch_dim, [-1])

  argmax_flat += batch_dim
  grad_input = tf.gather(grad_flat, argmax_flat)
  grad_input = tf.reshape(grad_input, tf.shape(y))
  return grad_input
项目:tensorflow-forward-ad    作者:renmengye    | 项目源码 | 文件源码
def Pack_FwGrad(*args, **kwargs):
  dx = args[1:]
  axis = kwargs["axis"]
  if all(map(lambda x: x is None, dx)):
    log.error("hey")
    return None
  else:
    ### Here we need to fill in zeros.
    def _mapper(_):
      dx = _[0]
      x = _[1]
      return dx if dx is not None else tf.zeros_like(x)

    dx = list(map(_mapper, zip(dx, list(args[0].inputs))))
    if tf.__version__.startswith("0"):
      return tf.pack(dx, axis=axis)
    else:
      return tf.stack(dx, axis=axis)
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def uppool(value, name='uppool'):  # TODO TBD??
    """N-dimensional version of the unpooling operation from
    https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf
    Note that the only dimension that can be unspecified is the first one (b)

    :param name:
    :param value: A Tensor of shape [b, d0, d1, ..., dn, ch]
    :return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]

    """
    with tf.name_scope(name) as scope:
        sh = value.get_shape().as_list()
        dim = len(sh[1:-1])
        print(value)
        out = (tf.reshape(value, [-1] + sh[-dim:]))
        for i in range(dim, 0, -1):
            # out = tf.concat(i, [out, tf.zeros_like(out)])  #original implementation added zeros
            out = tf.concat([out, tf.identity(out)], i)  # copies values
        out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]
        out = tf.reshape(out, out_size, name=scope)
    return out
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def zeros_like(x, dtype=None, name=None):
    """Instantiates an all-zeros Keras variable
    of the same shape as another Keras variable or tensor and returns it.

    # Arguments
        x: Keras variable or Keras tensor.
        dtype: String, dtype of returned Keras variable.
             None uses the dtype of x.

    # Returns
        A Keras variable with the shape of x filled with zeros.

    # Example
    ```python
        >>> from keras import backend as K
        >>> kvar = K.variable(np.random.random((2,3)))
        >>> kvar_zeros = K.zeros_like(kvar)
        >>> K.eval(kvar_zeros)
        array([[ 0.,  0.,  0.],
               [ 0.,  0.,  0.]], dtype=float32)
"""
return tf.zeros_like(x, dtype=dtype, name=name)

```

项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def iou(self, target_bbox, presence, per_timestep=False, reduce=True, start_t=1):

        pred_bbox, target_bbox, presence = [i[start_t:] for i in (self.pred_bbox, target_bbox, presence)]
        if not per_timestep:
            return _loss.intersection_over_union(pred_bbox, target_bbox, presence)
        else:
            iou = _loss.intersection_over_union(pred_bbox, target_bbox, reduce=False)
            iou = tf.where(presence, iou, tf.zeros_like(iou))
            iou = tf.reduce_sum(iou, (1, 2))
            p = tf.reduce_sum(tf.to_float(presence), (1, 2))
            if reduce:
                p = tf.maximum(p, tf.ones(tf.shape(presence)[0]))
                iou /= p
                return iou
            else:
                return iou, p
项目:mean-teacher    作者:CuriousAI    | 项目源码 | 文件源码
def classification_costs(logits, labels, name=None):
    """Compute classification cost mean and classification cost per sample

    Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
    Compute the mean over all examples.
    Note that unlabeled examples are treated differently in error calculation.
    """
    with tf.name_scope(name, "classification_costs") as scope:
        applicable = tf.not_equal(labels, -1)

        # Change -1s to zeros to make cross-entropy computable
        labels = tf.where(applicable, labels, tf.zeros_like(labels))

        # This will now have incorrect values for unlabeled examples
        per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)

        # Retain costs only for labeled
        per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))

        # Take mean over all examples, not just labeled examples.
        labeled_sum = tf.reduce_sum(per_sample)
        total_count = tf.to_float(tf.shape(per_sample)[0])
        mean = tf.div(labeled_sum, total_count, name=scope)

        return mean, per_sample
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def zeros_like(x, name=None):
    '''Instantiates an all-zeros Keras variable
    of the same shape as another Keras variable or tensor and returns it.

    # Arguments
        x: Keras variable or Keras tensor.

    # Returns
        A Keras variable, filled with `0.0`.

    # Example
    ```python
        >>> from keras import backend as K
        >>> kvar = K.variable(np.random.random((2,3)))
        >>> kvar_zeros = K.zeros_like(kvar)
        >>> K.eval(kvar_zeros)
        array([[ 0.,  0.,  0.],
               [ 0.,  0.,  0.]], dtype=float32)
'''
return tf.zeros_like(x, name=name)

```

项目:discoGAN.tensorflow.slim    作者:ilguyi    | 项目源码 | 文件源码
def GANLoss(logits, is_real=True, smoothing=0.9, name=None):
  """Computes standard GAN loss between `logits` and `labels`.

  Args:
    logits: logits
    is_real: boolean, True means `1` labeling, False means `0` labeling
    smoothing: one side label smoothing

  Returns:
    A scalar Tensor representing the loss value.
  """
  if is_real:
    # one side label smoothing
    labels = tf.fill(logits.get_shape(), smoothing)
  else:
    labels = tf.zeros_like(logits)

  with ops.name_scope(name, 'GAN_loss', [logits, labels]) as name:
    loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                                labels=labels,
                                logits=logits))

    return loss
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def noisy_inputs(self) -> tf.Tensor:
        """
        Return the input sequence, with noise added according to the `input_noise` parameter.

        If the `input_noise` parameter is not set, this method simply returns the input sequence. Otherwise, return a 
        tensor in which each time step of the input sequence is randomly set to zeros with probability given by the
        `input_noise` parameter.

        Returns
        -------
        tf.Tensor
            The input sequence, with noise added according to the `input_noise` parameter
        """
        if self.input_noise is None:
            return self.inputs

        # drop entire time steps with probability self.noise
        randoms = tf.random_uniform([self.max_step, self.batch_size], minval=0, maxval=1)
        randoms = tf.stack([randoms] * self.num_features, axis=2)

        result = tf.where(randoms > self.input_noise, self.inputs, tf.zeros_like(self.inputs))

        return result
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Returns the noisy output of the dropout
        return network * e
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def information_pool(self, inputs, max_alpha, alpha_mode, lognorm_prior, num_outputs=None, stride=2, scope=None):
        if num_outputs is None:
            num_ouputs = inputs.get_shape()[-1]
        # Creates the output convolutional layer
        network = self.conv(inputs, num_outputs=int(num_outputs), stride=stride)
        with tf.variable_scope(scope,'information_dropout'):
            # Computes the noise parameter alpha for the output
            alpha = conv2d(inputs, num_outputs=int(num_outputs), kernel_size=3,
                stride=stride, activation_fn=tf.sigmoid, scope='alpha')
            # Rescale alpha in the allowed range and add a small value for numerical stability
            alpha = 0.001 + max_alpha * alpha
            # Computes the KL divergence using either log-uniform or log-normal prior
            if not lognorm_prior:
                kl = - tf.log(alpha/(max_alpha + 0.001))
            else:
                mu1 = tf.get_variable('mu1', [], initializer=tf.constant_initializer(0.))
                sigma1 = tf.get_variable('sigma1', [], initializer=tf.constant_initializer(1.))
                kl = KL_div2(tf.log(tf.maximum(network,1e-4)), alpha, mu1, sigma1)
            tf.add_to_collection('kl_terms', kl)
        # Samples the noise with the given parameter
        e = sample_lognormal(mean=tf.zeros_like(network), sigma = alpha, sigma0 = self.sigma0)
        # Saves the log-output of the network (useful to compute the total correlation)
        tf.add_to_collection('log_network', tf.log(network * e))
        # Returns the noisy output of the dropout
        return network * e
项目:AtNRE    作者:jxwuyi    | 项目源码 | 文件源码
def get_rnn_init_state(x, cell):
    """
    x: [batch, dim], must match the dim of the cell
    """
    if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
        batch = x.get_shape()[0]
        z = list(cell.zero_state(batch, dtype=tf.float32))
        if isinstance(z[0], tuple):
            z[0] = (tf.zeros_like(x), x)
        else:
            z[0] = x
        return tuple(z)
    if isinstance(cell.state_size, tuple):
        #lstm cell
        assert(len(cell.state_size) == 2)
        return (tf.zeros_like(x), x)
    # assume GRU Cell
    return x
项目:openai-rl    作者:morgangiraud    | 项目源码 | 文件源码
def eligibility_traces(Qs_t, states_t, actions_t, discount, lambda_value):
    et = tf.get_variable(
        "eligibilitytraces"
        , shape=Qs_t.get_shape()
        , dtype=tf.float32
        , trainable=False
        , initializer=tf.zeros_initializer()
    )
    tf.summary.histogram('eligibilitytraces', et)
    dec_et_op = tf.assign(et, discount * lambda_value * et)
    with tf.control_dependencies([dec_et_op]):
        state_action_pairs = tf.stack([states_t, actions_t], 1)
        update_et_op = tf.scatter_nd_update(et, indices=state_action_pairs, updates=tf.ones_like(states_t, dtype=tf.float32))

    reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))

    return (et, update_et_op, reset_et_op)
项目:openai-rl    作者:morgangiraud    | 项目源码 | 文件源码
def eligibility_dutch_traces(Qs_t, states_t, actions_t, lr, discount, lambda_value):
    # Beware this trace has to be used with a different learning rule
    et = tf.get_variable(
        "eligibilitytraces"
        , shape=Qs_t.get_shape()
        , dtype=tf.float32
        , trainable=False
        , initializer=tf.zeros_initializer()
    )
    tf.summary.histogram('eligibilitytraces', et)
    state_action_pairs = tf.stack([states_t, actions_t], 1)
    current_trace = tf.gather_nd(et, state_action_pairs)
    updates = 1 - lr * discount * lambda_value * current_trace
    with tf.control_dependencies([updates]):
        dec_et_op = tf.assign(et, discount * lambda_value * et)
        with tf.control_dependencies([dec_et_op]):
            update_et_op = tf.scatter_nd_add(et, indices=state_action_pairs, updates=updates)

    reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))

    return (et, update_et_op, reset_et_op)
项目:TLDR    作者:zihualiu    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run this multi-layer cell on inputs, starting from state."""
    with tf.variable_scope("MultiRNNCellWithConn"):
      cur_state_pos = 0
      first_layer_input = cur_inp = inputs
      new_states = []
      for i, cell in enumerate(self._cells):
        with tf.variable_scope("Cell%d" % i):
          cur_state = tf.slice(
              state, [0, cur_state_pos], [-1, cell.state_size])
          cur_state_pos += cell.state_size
          # Add skip connection from the input of current time t.
          if i != 0:
            first_layer_input = first_layer_input
          else:
            first_layer_input = tf.zeros_like(first_layer_input)
          cur_inp, new_state = cell(tf.concat(1, [inputs, first_layer_input]), cur_state)
          new_states.append(new_state)
    return cur_inp, tf.concat(1, new_states)
项目:GalaxyGAN_python    作者:Ireneruru    | 项目源码 | 文件源码
def __init__(self):
        self.image = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))
        self.cond = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))

        self.gen_img = self.generator(self.cond)

        pos = self.discriminator(self.image, self.cond, False)
        neg = self.discriminator(self.gen_img, self.cond, True)
        pos_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pos, labels=tf.ones_like(pos)))
        neg_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.zeros_like(neg)))

        self.delta = tf.square(tf.reduce_mean(self.image)-(tf.reduce_mean(self.gen_img)))

        self.d_loss = pos_loss + neg_loss

        #with regularization
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.ones_like(neg))) + \
                  conf.L1_lambda * tf.reduce_mean(tf.abs(self.image - self.gen_img)) + conf.sum_lambda *self.delta

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'disc' in var.name]
        self.g_vars = [var for var in t_vars if 'gen' in var.name]
项目:TerpreT    作者:51alg    | 项目源码 | 文件源码
def make_test_node(self, hypers_name):
        outputs = self.tf_nodes[hypers_name]["outputs"]

        deltas = []
        for var_name, output_node in outputs.iteritems():
            data_node = self.tf_nodes[hypers_name]["placeholders"][var_name]
            output_rank = output_node.get_shape().ndims
            if output_rank == 1:
                output_node = tf.tile(tf.expand_dims(output_node, 0), [tf.shape(data_node)[0], 1])
            deltas.append(
                tf.to_int32(tf.argmax(output_node, dimension=1)) - data_node)

        zero_if_correct = tf.reduce_sum(tf.pack(deltas), reduction_indices=0)
        zero_elements = tf.equal(zero_if_correct, tf.zeros_like(zero_if_correct))
        n_correct = tf.reduce_sum(tf.to_int32(zero_elements))
        n_total = tf.shape(zero_if_correct)[0]
        accuracy = tf.truediv(n_correct, n_total)
        self.summary_nodes["test"] = tf.scalar_summary('test_accuracy', accuracy)
        self.tf_nodes[hypers_name]["accuracy"] = accuracy
项目:deepsleepnet    作者:akaraspt    | 项目源码 | 文件源码
def retrieve_seq_length_op2(data):
    """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],
    it can be used when the features of padding (on right hand side) are all zeros.

    Parameters
    -----------
    data : tensor
        [batch_size, n_step(max)] with zero padding on right hand side.

    Examples
    --------
    >>> data = [[1,2,0,0,0],
    ...         [1,2,3,0,0],
    ...         [1,2,6,1,0]]
    >>> o = retrieve_seq_length_op2(data)
    >>> sess = tf.InteractiveSession()
    >>> tl.layers.initialize_global_variables(sess)
    >>> print(o.eval())
    ... [2 3 4]
    """
    return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1)
项目:dreamscape    作者:themattinthehatt    | 项目源码 | 文件源码
def _define_loss(self):
        """Define loss function that will be used to optimize model params"""

        # define generator loss
        with tf.variable_scope('generator'):
            self.loss_gen = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.ones_like(self.disc_gen)))

        # define discriminator loss
        with tf.variable_scope('discriminator'):
            self.loss_disc = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_real,
                    labels=tf.ones_like(self.disc_real)) +
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.zeros_like(self.disc_gen)))

        # save summaries of losses
        tf.summary.scalar('loss_gen', self.loss_gen)
        tf.summary.scalar('loss_disc', self.loss_disc)
项目:GAN    作者:kunrenzhilu    | 项目源码 | 文件源码
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
        discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
                                                           name="disc_real_loss")

        discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
                                                           name="disc_fake_loss")
        self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real

        gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
        if use_features:
            gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
        else:
            gen_loss_features = 0
        self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features

        tf.summary.scalar("Discriminator_loss", self.discriminator_loss)
        tf.summary.scalar("Generator_loss", self.gen_loss)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:deep-learning    作者:ljanyst    | 项目源码 | 文件源码
def get_optimizers(self, learning_rate=0.002, smooth=0.1):
        #-----------------------------------------------------------------------
        # Define loss functions
        #-----------------------------------------------------------------------
        with tf.variable_scope('loses'):
            dsc_real_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_real_logits,
                labels=tf.ones_like(self.dsc_real_logits) * (1 - smooth)))

            dsc_fake_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_fake_logits,
                labels=tf.zeros_like(self.dsc_fake_logits)))

            dsc_loss = (dsc_real_loss + dsc_fake_loss)/2

            gen_loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.dsc_fake_logits,
                labels=tf.ones_like(self.dsc_fake_logits)))

        #-----------------------------------------------------------------------
        # Optimizers
        #-----------------------------------------------------------------------
        trainable_vars = tf.trainable_variables()
        gen_vars = [var for var in trainable_vars \
                      if var.name.startswith('generator')]
        dsc_vars = [var for var in trainable_vars \
                      if var.name.startswith('discriminator')]

        with tf.variable_scope('optimizers'):
            with tf.variable_scope('deiscriminator_optimizer'):
                dsc_train_opt = tf.train.AdamOptimizer(learning_rate) \
                  .minimize(dsc_loss, var_list=dsc_vars)
            with tf.variable_scope('generator_optimizer'):
                gen_train_opt = tf.train.AdamOptimizer(learning_rate) \
                  .minimize(gen_loss, var_list=gen_vars)

        return dsc_train_opt, gen_train_opt, dsc_loss, gen_loss
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def loss(self, x, y):
        with tf.name_scope('loss'):
            z_mu, z_lv = self._encode(x)
            z = GaussianSampleLayer(z_mu, z_lv)
            xh = self._generate(z, y)

            D_KL = tf.reduce_mean(
                GaussianKLD(
                    slim.flatten(z_mu),
                    slim.flatten(z_lv),
                    slim.flatten(tf.zeros_like(z_mu)),
                    slim.flatten(tf.zeros_like(z_lv)),
                )
            )
            logPx = tf.reduce_mean(
                GaussianLogDensity(
                    slim.flatten(x),
                    slim.flatten(xh),
                    tf.zeros_like(slim.flatten(xh))),
            )

        loss = dict()
        loss['G'] = - logPx + D_KL
        loss['D_KL'] = D_KL
        loss['logP'] = logPx

        tf.summary.scalar('KL-div', D_KL)
        tf.summary.scalar('logPx', logPx)

        tf.summary.histogram('xh', xh)
        tf.summary.histogram('x', x)
        return loss
项目:vae-npvc    作者:JeremyCCHsu    | 项目源码 | 文件源码
def loss(self, x, y):
        '''
        Args:
            x: shape=[s, b, c]
            y: shape=[s, b]
        Returns:
            a `dict` of losses
        '''
        z_mu, z_lv = self._encode(x, is_training=self.is_training)
        z = GaussianSampleLayer(z_mu, z_lv)
        xh = self._decode(z, y, is_training=self.is_training)

        with tf.name_scope('loss'):
            with tf.name_scope('E_log_p_x_zy'):
                L_x = -1.0 * tf.reduce_mean(
                    GaussianLogDensity(x, xh, tf.zeros_like(x)),
                )
            with tf.name_scope('D_KL_z'):
                L_z = tf.reduce_mean(
                    GaussianKLD(
                        z_mu, z_lv,
                        tf.zeros_like(z_mu), tf.zeros_like(z_lv)
                    )
                )
            loss = {
                'L_x': L_x,
                'L_z': L_z,
            }

        tf.summary.scalar('L_x', L_x)
        tf.summary.scalar('L_z', L_z)
        return loss
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_whole_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    # mask = K.expand_dims(K.sum(y_true,axis=4),axis=4)
    # cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4)
    # y_pred = y_pred + cmp_mask

    y_true = y_true[:,:,:,:,:3]
    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))

    mask_true = K.sum(y_true, axis=4)
    mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_core_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true = y_true[:,:,:,:,:3]


    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision)


    mask_true1 = K.expand_dims(y_true[:, :, :, :, 2],axis=4)
    mask_true2 = K.expand_dims(y_true[:, :, :, :, 0],axis=4)
    mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4)
    mask_pred1 = K.expand_dims(y_pred_decision[:, :, :, :, 2],axis=4)
    mask_pred2 = K.expand_dims(y_pred_decision[:, :, :, :, 0],axis=4)
    mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4) * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def dice_enhance_mod(y_true, y_pred):
    """
    Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5
                    TP
        Dice = 2 -------
                  T + P
    Parameters
    ----------
    y_true : keras.placeholder
        Placeholder that contains the ground truth labels of the classes
    y_pred : keras.placeholder
        Placeholder that contains the class prediction

    Returns
    -------
    scalar
        Dice metric
    """
    y_true = y_true[:,:,:,:,:3]

    y_pred_decision = tf.floor((y_pred  + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True))
    # y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision)



    mask_true = y_true[:, :, :, :, 2]
    mask_pred = y_pred_decision[:, :, :, :, 2] * K.sum(y_true, axis=4)

    y_sum = K.sum(mask_true * mask_pred)

    return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def accumulate_gradients(self, minibatch_grads, num_minibatches=1):
        """Accumulate gradients for `num_minibatches` minibatches."""
        if self.var_list is None:
            self.var_list = tf.trainable_variables()

        if self.grads_and_vars is None:
            self.grads_and_vars = [(
                tf.Variable(tf.zeros_like(var.initialized_value()),
                            dtype=tf.float32,
                            trainable=False),
                var) for var in self.var_list]

        # Add 1/num_minibatches * minibatch_grads to current gradients.
        def _add_op(gv_tmp, mgv_tmp):
            return tf.add(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        def _set_op(gv_tmp, mgv_tmp):
            return tf.assign(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        #grads = [(gv[0].assign_add(tf.divide(mgv[0], num_minibatches)), gv[1])
        #         for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        #grads = tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _add_op(), fn2 = lambda: _set_op())
        grads = [tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _set_op(gv[0], mgv[0]), fn2 = lambda: _add_op(gv[0], mgv[0]))
                 for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        with tf.control_dependencies(grads):
            self.mini_flag = tf.assign(self.mini_flag, tf.constant([1], dtype = tf.float32))
        grads = [(only_grad, gv[1])
                 for (gv, only_grad) in zip(self.grads_and_vars, grads)]
        return self.mini_flag, grads
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def zero_grad(self):
        if self.grads_and_vars is None:
            self.grads_and_vars = [(
                tf.Variable(tf.zeros_like(var), dtype=tf.float32, trainable=False),
                var) for var in self.var_list]
        return [tf.assign(gv[0], tf.zeros_like(gv[0]))
                for gv in self.grads_and_vars]
项目:how_to_convert_text_to_images    作者:llSourcell    | 项目源码 | 文件源码
def compute_losses(self, images, wrong_images, fake_images, embeddings):
        real_logit = self.model.get_discriminator(images, embeddings)
        wrong_logit = self.model.get_discriminator(wrong_images, embeddings)
        fake_logit = self.model.get_discriminator(fake_images, embeddings)

        real_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(real_logit,
                                                    tf.ones_like(real_logit))
        real_d_loss = tf.reduce_mean(real_d_loss)
        wrong_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(wrong_logit,
                                                    tf.zeros_like(wrong_logit))
        wrong_d_loss = tf.reduce_mean(wrong_d_loss)
        fake_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.zeros_like(fake_logit))
        fake_d_loss = tf.reduce_mean(fake_d_loss)
        if cfg.TRAIN.B_WRONG:
            discriminator_loss =\
                real_d_loss + (wrong_d_loss + fake_d_loss) / 2.
            self.log_vars.append(("d_loss_wrong", wrong_d_loss))
        else:
            discriminator_loss = real_d_loss + fake_d_loss
        self.log_vars.append(("d_loss_real", real_d_loss))
        self.log_vars.append(("d_loss_fake", fake_d_loss))

        generator_loss = \
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.ones_like(fake_logit))
        generator_loss = tf.reduce_mean(generator_loss)

        return discriminator_loss, generator_loss
项目:EWC    作者:stokesj    | 项目源码 | 文件源码
def create_fisher_ops(self):
        self.fisher_diagonal = self.bias_shaped_variables(name='bias_grads2', c=0.0, trainable=False) +\
                               self.weight_shaped_variables(name='weight_grads2', c=0.0, trainable=False)

        self.fisher_accumulate_op = [tf.assign_add(f1, f2) for f1, f2 in zip(self.fisher_diagonal, self.fisher_minibatch)]
        scale = 1 / float(self.ewc_batches * self.ewc_batch_size)
        self.fisher_full_batch_average_op = [tf.assign(var, scale * var) for var in self.fisher_diagonal]
        self.fisher_zero_op = [tf.assign(tensor, tf.zeros_like(tensor)) for tensor in self.fisher_diagonal]
项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _instantiate_subnet(self, batch, block_idx, seq_prefix):
    def zeros_fn():
      return tf.zeros_like(batch)
    def base_case_fn():
      return self._children[block_idx, seq_prefix](batch)
    def recursive_case_fn():
      first_subnet = self._instantiate_subnet(
          batch, block_idx, seq_prefix + (0,))
      return self._instantiate_subnet(
          first_subnet, block_idx, seq_prefix + (1,))
    if len(seq_prefix) == self._fractal_block_depth:
      return base_case_fn()
    else:
      choice = self._drop_path_choices[self._choice_id[(block_idx, seq_prefix)]]
      base_case = tf.cond(
          tf.not_equal(choice, self._JUST_RECURSE), base_case_fn, zeros_fn)
      base_case.set_shape(batch.get_shape())
      recursive_case = tf.cond(
          tf.not_equal(choice, self._JUST_BASE), recursive_case_fn, zeros_fn)
      recursive_case.set_shape(batch.get_shape())
      cases = [
          (tf.equal(choice, self._BOTH),
           lambda: self._mixer(base_case, recursive_case)),
          (tf.equal(choice, self._JUST_BASE), lambda: base_case),
          (tf.equal(choice, self._JUST_RECURSE), lambda: recursive_case)]
      result = tf.case(cases, lambda: base_case)
      result.set_shape(batch.get_shape())
      return result
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def create_discriminator_loss(disc_real_output, disc_fake_output):
    # I.e. did we correctly identify the input as real or not?
    cross_entropy_real = tf.nn.sigmoid_cross_entropy_with_logits(labels = disc_real_output, logits = tf.ones_like(disc_real_output))
    disc_real_loss     = tf.reduce_mean(cross_entropy_real, name='disc_real_loss')

    cross_entropy_fake = tf.nn.sigmoid_cross_entropy_with_logits(labels = disc_fake_output, logits = tf.zeros_like(disc_fake_output))
    disc_fake_loss     = tf.reduce_mean(cross_entropy_fake, name='disc_fake_loss')

    return disc_real_loss, disc_fake_loss
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def zeros_like(x, name=None):
    '''Instantiates an all-zeros tensor
    of the same shape as another tensor.
    '''
    return tf.zeros_like(x, name=name)