Python tensorflow 模块,ones_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.ones_like()

项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
                                           name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,),
                                          name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
                            + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _meshgrid(self, height, width):
    with tf.variable_scope('_meshgrid'):
      # This should be equivalent to:
      #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
      #                         np.linspace(-1, 1, height))
      #  ones = np.ones(np.prod(x_t.shape))
      #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
      x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
      y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.pack([1, width])))

      x_t_flat = tf.reshape(x_t, (1, -1))
      y_t_flat = tf.reshape(y_t, (1, -1))

      ones = tf.ones_like(x_t_flat)
      grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
      return grid
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def generate_mask(img_mask_list, h, w, l):
    img_masks, loss_masks = [], []

    for i in range(l):
        # generate image mask
        img_mask = img_mask_list[i]
        img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
        img_mask = tf.reshape(img_mask, (h, w))
        img_masks.append(img_mask)

        # generate loss mask
        s_total   = h * w
        s_mask    = tf.reduce_sum(img_mask)
        def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
        def f2(): return tf.zeros_like(img_mask)
        def f3(): return tf.ones_like(img_mask)
        loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
                             (tf.less(s_mask, s_total/2), f1)],
                             default=f3)

        loss_masks.append(loss_mask)

    return tf.stack(img_masks), tf.stack(loss_masks)
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def create_generator_loss(disc_output, gene_output, features):
    # I.e. did we fool the discriminator?
    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=disc_output, logits=tf.ones_like(disc_output))
    gene_ce_loss  = tf.reduce_mean(cross_entropy, name='gene_ce_loss')

    # I.e. does the result look like the feature?
    K = int(gene_output.get_shape()[1])//int(features.get_shape()[1])
    assert K == 2 or K == 4 or K == 8    
    downscaled = _downscale(gene_output, K)

    gene_l1_loss  = tf.reduce_mean(tf.abs(downscaled - features), name='gene_l1_loss')

    gene_loss     = tf.add((1.0 - FLAGS.gene_l1_factor) * gene_ce_loss,
                           FLAGS.gene_l1_factor * gene_l1_loss, name='gene_loss')

    return gene_loss
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
        discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
                                                           name="disc_real_loss")

        discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
                                                           name="disc_fake_loss")
        self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real

        gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
        if use_features:
            gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
        else:
            gen_loss_features = 0
        self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features

        tf.scalar_summary("Discriminator_loss", self.discriminator_loss)
        tf.scalar_summary("Generator_loss", self.gen_loss)
项目:tflearn_seq2seq    作者:ichuang    | 项目源码 | 文件源码
def sequence_loss(self, y_pred, y_true):
        '''
        Loss function for the seq2seq RNN.  Reshape predicted and true (label) tensors, generate dummy weights,
        then use seq2seq.sequence_loss to actually compute the loss function.
        '''
        if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
        logits = tf.unpack(y_pred, axis=1)      # list of [-1, num_decoder_synbols] elements
        targets = tf.unpack(y_true, axis=1)     # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
        if self.verbose > 2:
            print ("my_sequence_loss logits=%s" % (logits,))
            print ("my_sequence_loss targets=%s" % (targets,))
        weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
        if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
        sl = seq2seq.sequence_loss(logits, targets, weights)
        if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
        return sl
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def _create(self, d_real, d_fake):
        ops = self.ops
        config = self.config
        gan = self.gan

        generator_target_probability = config.generator_target_probability or 0.8
        label_smooth = config.label_smooth or 0.2

        zeros = tf.zeros_like(d_fake)
        ones = tf.ones_like(d_fake)
        if config.improved:
            g_loss = self.sigmoid_kl_with_logits(d_fake, generator_target_probability)
            d_loss = self.sigmoid_kl_with_logits(d_real, 1.-label_smooth) + \
                    tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
        else:
            g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
            d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=zeros) + \
                     tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=ones)

        return [d_loss, g_loss]
项目:GAN-Sentence    作者:huseinzol05    | 项目源码 | 文件源码
def __init__(self, num_layers, size_layer, dimension_input, len_noise, sequence_size, learning_rate):
        self.noise = tf.placeholder(tf.float32, [None, None, len_noise])
        self.fake_input = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.true_sentence = tf.placeholder(tf.float32, [None, None, dimension_input])
        self.initial_layer = generator_encode(self.noise, num_layers, size_layer, len_noise)
        self.final_outputs = generator_sentence(self.fake_input, self.initial_layer, num_layers, size_layer, dimension_input)
        fake_logits = discriminator(self.final_outputs, num_layers, size_layer, dimension_input)
        true_logits = discriminator(self.true_sentence, num_layers, size_layer, dimension_input, reuse = True)
        d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = true_logits, labels = tf.ones_like(true_logits)))
        d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.zeros_like(fake_logits)))
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.ones_like(fake_logits)))

        self.d_loss = d_loss_real + d_loss_fake
        d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'discriminator')
        g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_encode') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sentence')
        self.d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.d_loss, var_list = d_vars)
        self.g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1 = 0.5).minimize(self.g_loss, var_list = g_vars)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        mean, cov_tril = self.mean, self.cov_tril
        if not self.is_reparameterized:
            mean = tf.stop_gradient(mean)
            cov_tril = tf.stop_gradient(cov_tril)

        def tile(t):
            new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0)
            return tf.tile(tf.expand_dims(t, 0), new_shape)

        batch_mean = tile(mean)
        batch_cov = tile(cov_tril)
        # n_dim -> n_dim x 1 for matmul
        batch_mean = tf.expand_dims(batch_mean, -1)
        noise = tf.random_normal(tf.shape(batch_mean), dtype=self.dtype)
        samples = tf.matmul(batch_cov, noise) + batch_mean
        samples = tf.squeeze(samples, -1)
        # Update static shape
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(tf.TensorShape([static_n_samples])
                          .concatenate(self.get_batch_shape())
                          .concatenate(self.get_value_shape()))
        return samples
项目:py-noisemaker    作者:aayars    | 项目源码 | 文件源码
def density_map(tensor, shape):
    """
    """

    height, width, channels = shape

    bins = max(height, width)

    # values = value_map(tensor, shape, keep_dims=True)
    # values = tf.minimum(tf.maximum(tensor, 0.0), 1.0)  # TODO: Get this to work with HDR data
    values = tensor

    # https://stackoverflow.com/a/34143927
    binned_values = tf.cast(tf.reshape(values * (bins - 1), [-1]), tf.int32)
    ones = tf.ones_like(binned_values, dtype=tf.int32)
    counts = tf.unsorted_segment_sum(ones, binned_values, bins)

    out = tf.gather(counts, tf.cast(values[:, :] * (bins - 1), tf.int32))

    return tf.ones(shape) * normalize(tf.cast(out, tf.float32))
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
        DiceRatio            =     DiceRatio + Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     0       
    Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
    DiceRatio            =     DiceRatio + Sub_DiceRatio
    del Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def sequence_loss(self, y_pred, y_true):
        '''
        Loss function for the seq2seq RNN.  Reshape predicted and true (label) tensors, generate dummy weights,
        then use seq2seq.sequence_loss to actually compute the loss function.
        '''
        if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
        logits = tf.unstack(y_pred, axis=1)     # list of [-1, num_decoder_synbols] elements
        targets = tf.unstack(y_true, axis=1)        # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
        if self.verbose > 2:
            print ("my_sequence_loss logits=%s" % (logits,))
            print ("my_sequence_loss targets=%s" % (targets,))
        weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
        if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
        sl = seq2seq.sequence_loss(logits, targets, weights)
        if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
        return sl
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def size(x, reduce_instance_dims=True, name=None):
  """Computes the total size of instances in a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor`.
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with tf.name_scope(name, 'size'):
    # Note: Calling `sum` defined in this module, not the builtin.
    return sum(tf.ones_like(x), reduce_instance_dims)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def segment_indices(segment_ids, name=None):
  """Returns a `Tensor` of indices within each segment.

  segment_ids should be a sequence of non-decreasing non-negative integers that
  define a set of segments, e.g. [0, 0, 1, 2, 2, 2] defines 3 segments of length
  2, 1 and 3.  The return value is a `Tensor` containing the indices within each
  segment.

  Example input: [0, 0, 1, 2, 2, 2]
  Example output: [0, 1, 0, 0, 1, 2]

  Args:
    segment_ids: A 1-d `Tensor` containing an non-decreasing sequence of
        non-negative integers with type `tf.int32` or `tf.int64`.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the indices within each segment.
  """
  with tf.name_scope(name, 'segment_indices'):
    segment_lengths = tf.segment_sum(tf.ones_like(segment_ids), segment_ids)
    segment_starts = tf.gather(tf.concat([[0], tf.cumsum(segment_lengths)], 0),
                               segment_ids)
    return (tf.range(tf.size(segment_ids, out_type=segment_ids.dtype)) -
            segment_starts)
项目:Deep-Image-Matting    作者:Joker316701882    | 项目源码 | 文件源码
def unpool(pool, ind, ksize=[1, 2, 2, 1], scope='unpool'):

    with tf.variable_scope(scope):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]

        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)

        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
    with tf.name_scope(scope):
        input_shape =  tf.shape(pool)
        output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
        flat_input_size = tf.cumprod(input_shape)[-1]
        flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
        pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
                                shape=tf.stack([input_shape[0], 1, 1, 1]))
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, tf.stack([flat_input_size, 1]))
        ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, tf.stack(output_shape))
        ret = tf.reshape(ret, shape=shape)
        return ret
项目:DaNet-Tensorflow    作者:khaotik    | 项目源码 | 文件源码
def __call__(self, s_embed, s_src_pwr, s_mix_pwr, s_embed_flat=None):
        if s_embed_flat is None:
            s_embed_flat = tf.reshape(
                s_embed,
                [hparams.BATCH_SIZE, -1, hparams.EMBED_SIZE])
        with tf.variable_scope(self.name):
            s_src_assignment = tf.argmax(s_src_pwr, axis=1)
            s_indices = tf.reshape(
                s_src_assignment,
                [hparams.BATCH_SIZE, -1])
            fn_segmean = lambda _: tf.unsorted_segment_sum(
                _[0], _[1], hparams.MAX_N_SIGNAL)
            s_attractors = tf.map_fn(
                fn_segmean, (s_embed_flat, s_indices), hparams.FLOATX)
            s_attractors_wgt = tf.map_fn(
                fn_segmean, (tf.ones_like(s_embed_flat), s_indices),
                hparams.FLOATX)
            s_attractors /= (s_attractors_wgt + 1.)

        if hparams.DEBUG:
            self.debug_fetches = dict()
        # float[B, C, E]
        return s_attractors
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ones_like(x, dtype=None, name=None):
    """Instantiates an all-ones Keras variable
    of the same shape as another Keras variable or tensor and returns it.

    # Arguments
        x: Keras variable or tensor.
        dtype: String, dtype of returned Keras variable.
             None uses the dtype of x.

    # Returns
        A Keras variable with the shape of x filled with ones.

    # Example
    ```python
        >>> from keras import backend as K
        >>> kvar = K.variable(np.random.random((2,3)))
        >>> kvar_ones = K.ones_like(kvar)
        >>> K.eval(kvar_ones)
        array([[ 1.,  1.,  1.],
               [ 1.,  1.,  1.]], dtype=float32)
"""
return tf.ones_like(x, dtype=dtype, name=name)

```

项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def ptb_producer(raw_data, batch_size, num_steps, name=None):
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data  = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
        data_len  = tf.size(raw_data)
        batch_len = data_len // batch_size
        data      = tf.reshape(raw_data[0 : batch_size * batch_len],
                               [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        epoch_size = tf.identity(epoch_size, name="epoch_size")

        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()

        x = tf.strided_slice(data, [0, i * num_steps],
                             [batch_size, (i + 1) * num_steps],
                             #tf.ones_like([0, i * num_steps]))
                             [1,1])
        x.set_shape([batch_size, num_steps])
        y = tf.strided_slice(data, [0, i * num_steps + 1],
                             [batch_size, (i + 1) * num_steps + 1],
                             #tf.ones_like([0, i * num_steps]))
                             [1,1])
        y.set_shape([batch_size, num_steps])
        return x, y
项目:JetsonTX1_im2txt    作者:Netzeband    | 项目源码 | 文件源码
def build_inputs(self):
    if self.mode == "inference":
      # Inference mode doesn't read from disk, so defer to parent.
      return super(ShowAndTellModel, self).build_inputs()
    else:
      # Replace disk I/O with random Tensors.
      self.images = tf.random_uniform(
          shape=[self.config.batch_size, self.config.image_height,
                 self.config.image_width, 3],
          minval=-1,
          maxval=1)
      self.input_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.target_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.input_mask = tf.ones_like(self.input_seqs)
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def spatial_transformer(U, theta, out_height, out_width):
    num_batch = tf.shape(U)[0]
    height, width, num_channels = U.get_shape()[1:]

    x_t, y_t = meshgrid(out_height, out_width)
    x_t = tf.expand_dims(x_t, 0)
    y_t = tf.expand_dims(y_t, 0)
    if theta.get_shape()[1] == 3:
        s, t_x, t_y = tf.split(1, 3, theta)
        x_s = tf.reshape(s*tf.tile(x_t, [num_batch,1]) + t_x, [-1])
        y_s = tf.reshape(s*tf.tile(y_t, [num_batch,1]) + t_y, [-1])
    else:
        grid = tf.expand_dims(tf.concat(0, [x_t, y_t, tf.ones_like(x_t)]), 0)
        grid = tf.tile(grid, [num_batch,1,1])
        grid_t = tf.batch_matmul(tf.reshape(theta, [-1,2,3]), grid)
        x_s = tf.reshape(tf.slice(grid_t, [0,0,0], [-1,1,-1]), [-1])
        y_s = tf.reshape(tf.slice(grid_t, [0,1,0], [-1,1,-1]), [-1])

    return transform(U, x_s, y_s, num_batch, out_height, out_width, num_channels)

# last layer of localization net
项目:tf_practice    作者:juho-lee    | 项目源码 | 文件源码
def spatial_transformer(U, theta, out_height, out_width):
    num_batch = tf.shape(U)[0]
    height, width, num_channels = U.get_shape()[1:]

    x_t, y_t = meshgrid(out_height, out_width)
    x_t = tf.expand_dims(x_t, 0)
    y_t = tf.expand_dims(y_t, 0)
    if theta.get_shape()[1] == 3:
        s, t_x, t_y = tf.split(1, 3, theta)
        x_s = tf.reshape(s*tf.tile(x_t, [num_batch,1]) + t_x, [-1])
        y_s = tf.reshape(s*tf.tile(y_t, [num_batch,1]) + t_y, [-1])
    else:
        grid = tf.expand_dims(tf.concat(0, [x_t, y_t, tf.ones_like(x_t)]), 0)
        grid = tf.tile(grid, [num_batch,1,1])
        grid_t = tf.batch_matmul(tf.reshape(theta, [-1,2,3]), grid)
        x_s = tf.reshape(tf.slice(grid_t, [0,0,0], [-1,1,-1]), [-1])
        y_s = tf.reshape(tf.slice(grid_t, [0,1,0], [-1,1,-1]), [-1])

    return transform(U, x_s, y_s, num_batch, out_height, out_width, num_channels)

# last layer of localization net
项目:rltools    作者:sisl    | 项目源码 | 文件源码
def _make_actiondist_ops(self, obs_B_Df):
        with tf.variable_scope('flat'):
            flat = nn.FlattenLayer(obs_B_Df)
        with tf.variable_scope('hidden'):
            net = nn.FeedforwardNet(flat.output, flat.output_shape, self.hidden_spec)
        with tf.variable_scope('out'):
            mean_layer = nn.AffineLayer(net.output, net.output_shape, self.action_space.shape,
                                        Winitializer=tf.zeros_initializer, binitializer=None)

        means_B_Da = mean_layer.output

        # logstdev params
        logstdevs_1_Da = tf.get_variable('logstdevs_1_Da', shape=(1, self.action_space.shape[0]),
                                         initializer=tf.constant_initializer(self.init_logstdev))
        stdevs_1_Da = self.min_stdev + tf.exp(
            logstdevs_1_Da)  # Required for stability of kl computations
        stdevs_B_Da = tf.ones_like(means_B_Da) * stdevs_1_Da

        actiondist_B_Pa = tf.concat(1, [means_B_Da, stdevs_B_Da])
        return actiondist_B_Pa
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def test_discriminator_loss_with_placeholder_for_logits(self):
        logits = tf.placeholder(tf.float32, shape=(None, 4))
        logits2 = tf.placeholder(tf.float32, shape=(None, 4))
        real_weights = tf.ones_like(logits, dtype=tf.float32)
        generated_weights = tf.ones_like(logits, dtype=tf.float32)

        loss = self._d_loss_fn(
            logits, logits2, real_weights=real_weights,
            generated_weights=generated_weights)

        with self.test_session() as sess:
            loss = sess.run(loss,
                            feed_dict={
                                logits: [self._discriminator_real_outputs_np],
                                logits2: [self._discriminator_gen_outputs_np],
                            })
            self.assertAlmostEqual(self._expected_d_loss, loss, 5)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def sigmoid_kl_with_logits(logits, targets):
        """ Sigmoid cross entropy with smooth labels
        Args:
            logits: logits
            targets: smooth targets

        Returns:
            cross entropy loss
        """

        assert isinstance(targets, float)
        if targets in [0., 1.]:
            entropy = 0.
        else:
            entropy = - targets * np.log(targets) - \
                (1. - targets) * np.log(1. - targets)
        return tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(logits) * targets, logits=logits) - entropy
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _sigmoid_kl_with_logits(self, logits, targets):
        """ Sigmoid cross entropy with smooth labels

        Args:
            logits: logits
            targets: smooth targets

        Returns:
            cross entropy loss
        """

        assert isinstance(targets, float)
        if targets in [0., 1.]:
            entropy = 0.
        else:
            entropy = - targets * \
                np.log(targets) - (1. - targets) * np.log(1. - targets)
        return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.ones_like(logits) * targets) - entropy
项目:Neural-EM    作者:sjoerdvansteenkiste    | 项目源码 | 文件源码
def init_state(self, batch_size, K, dtype):
        # inner RNN hidden state init
        with tf.name_scope('inner_RNN_init'):
            h = self.cell.zero_state(batch_size * K, dtype)

        # initial prediction (B, K, W, H, C)
        with tf.name_scope('pred_init'):
            pred_shape = tf.stack([batch_size, K] + self.input_shape.as_list())
            pred = tf.ones(shape=pred_shape, dtype=dtype) * self.pred_init

        # initial gamma (B, K, W, H, 1)
        with tf.name_scope('gamma_init'):
            gamma_shape = self.gamma_shape.as_list()
            shape = tf.stack([batch_size, K] + gamma_shape)

            # init with Gaussian distribution
            gamma = tf.abs(tf.random_normal(shape, dtype=dtype))
            gamma /= tf.reduce_sum(gamma, 1, keep_dims=True)

            # init with all 1 if K = 1
            if K == 1:
                gamma = tf.ones_like(gamma)

            return h, pred, gamma
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def remove(self, ids):
    """Remove the ids (and their associated scores) from the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(
          self.id_to_score,
          ids,
          tf.ones_like(
              ids, dtype=tf.float32) * tf.float32.min)
      # We assume that removed ids are almost always in the shortlist,
      # so it makes no sense to hide the Op behind a tf.cond
      shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
                                                                  ids)
      u1 = tf.scatter_update(
          self.sl_ids, tf.concat(0, [[0], shortlist_ids_to_remove]),
          tf.concat(0, [new_length,
                        tf.ones_like(shortlist_ids_to_remove) * -1]))
      u2 = tf.scatter_update(
          self.sl_scores,
          shortlist_ids_to_remove,
          tf.float32.min * tf.ones_like(
              shortlist_ids_to_remove, dtype=tf.float32))
      self.last_ops = [scatter_op, u1, u2]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def remove(self, ids):
    """Remove the ids (and their associated scores) from the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(
          self.id_to_score,
          ids,
          tf.ones_like(
              ids, dtype=tf.float32) * tf.float32.min)
      # We assume that removed ids are almost always in the shortlist,
      # so it makes no sense to hide the Op behind a tf.cond
      shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
                                                                  ids)
      u1 = tf.scatter_update(
          self.sl_ids, tf.concat(0, [[0], shortlist_ids_to_remove]),
          tf.concat(0, [new_length,
                        tf.ones_like(shortlist_ids_to_remove) * -1]))
      u2 = tf.scatter_update(
          self.sl_scores,
          shortlist_ids_to_remove,
          tf.float32.min * tf.ones_like(
              shortlist_ids_to_remove, dtype=tf.float32))
      self.last_ops = [scatter_op, u1, u2]
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ones_like(x, name=None):
    '''Instantiates an all-ones Keras variable
    of the same shape as another Keras variable or tensor and returns it.

    # Arguments
        x: Keras variable or tensor.

    # Returns
        A Keras variable, filled with `1.0`.

    # Example
    ```python
        >>> from keras import backend as K
        >>> kvar = K.variable(np.random.random((2,3)))
        >>> kvar_ones = K.ones_like(kvar)
        >>> K.eval(kvar_ones)
        array([[ 1.,  1.,  1.],
               [ 1.,  1.,  1.]], dtype=float32)
'''
return tf.ones_like(x, name=name)

```

项目:im2txt_api    作者:mainyaa    | 项目源码 | 文件源码
def build_inputs(self):
    if self.mode == "inference":
      # Inference mode doesn't read from disk, so defer to parent.
      return super(ShowAndTellModel, self).build_inputs()
    else:
      # Replace disk I/O with random Tensors.
      self.images = tf.random_uniform(
          shape=[self.config.batch_size, self.config.image_height,
                 self.config.image_width, 3],
          minval=-1,
          maxval=1)
      self.input_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.target_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.input_mask = tf.ones_like(self.input_seqs)
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
项目:auDeep    作者:auDeep    | 项目源码 | 文件源码
def loss(self) -> tf.Tensor:
        """
        Computes the reconstruction loss of the autoencoder.

        The reconstruction loss is computed as the root mean square error between the target sequence and the 
        reconstructed sequence.

        Returns
        -------
        tf.Tensor
            Scalar tensor containing the reconstruction loss averaged over the entire input batch
        """
        reconstruction = self.reconstruction

        if self.mask_silence:
            reconstruction = tf.where(self.targets == -1., -tf.ones_like(reconstruction), reconstruction)

        loss = tf.sqrt(tf.reduce_mean(tf.square(self.targets - reconstruction)))
        summaries.scalar_summaries(loss)

        tf.add_to_collection(tf.GraphKeys.LOSSES, loss)

        return loss
项目:VAE-GAN    作者:sergeybok    | 项目源码 | 文件源码
def build_vae(self,encoder_shapes,encoder_filters,optimizer=tf.train.AdamOptimizer,conv=True):
        self.encoder_shapes = encoder_shapes
        self.encoder_filters = encoder_filters
        self.encoder_X = tf.placeholder(tf.float32,shape=[None,28,28,1], name='encoder_X')

        self.mu, self.sigma, self.encoder_params = self.build_encoder(self.encoder_X)

        Qz = tf.contrib.distributions.Normal(mu=self.mu, sigma=self.sigma)
        z_sample = Qz.sample()

        self.decoded = self.build_generator(z_sample,self.phase,weights=self.gen_params)

        self.klloss = -(1)*tf.reduce_sum(1 + tf.log(z_sigma**2) - z_mu**2 - z_sigma**2,1)
        #sigmaloss = tf.reduce_sum((tf.ones_like(z_sigma)-z_sigma)**4 )

        offset = 1e-7
        obs = tf.clip_by_value(self.decoded, offset, 1 - offset)
        self.logloss = -1*(tf.reduce_sum(self.encoder_X*tf.log(obs) + (1-self.encoder_X)*tf.log(1-obs)))


        self.vae_cost = tf.reduce_mean(logloss + klloss)

        self.vae_optimizer = optimizer(self.LR)
        self.train_step_e = self.vae_optimizer.minimize(self.vae_cost,var_list=self.encoder_params)
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    b_tm1_ = tf.gather(b_tm1, idxs)
    shape = self.get_variable_shape(x_tm1)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1))
    tm1_ = tf.gather(tm1, idxs)
    t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
    t_ = tf.gather(t, idxs)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_)
    else:
      beta_t_ = tm1_/t_
    b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_)
    b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_)
    return b_t, t

  #=============================================================
项目:semsearch    作者:sanjana-Bijoe    | 项目源码 | 文件源码
def build_inputs(self):
    if self.mode == "inference":
      # Inference mode doesn't read from disk, so defer to parent.
      return super(ShowAndTellModel, self).build_inputs()
    else:
      # Replace disk I/O with random Tensors.
      self.images = tf.random_uniform(
          shape=[self.config.batch_size, self.config.image_height,
                 self.config.image_width, 3],
          minval=-1,
          maxval=1)
      self.input_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.target_seqs = tf.random_uniform(
          [self.config.batch_size, 15],
          minval=0,
          maxval=self.config.vocab_size,
          dtype=tf.int64)
      self.input_mask = tf.ones_like(self.input_seqs)
项目:openai-rl    作者:morgangiraud    | 项目源码 | 文件源码
def eligibility_traces(Qs_t, states_t, actions_t, discount, lambda_value):
    et = tf.get_variable(
        "eligibilitytraces"
        , shape=Qs_t.get_shape()
        , dtype=tf.float32
        , trainable=False
        , initializer=tf.zeros_initializer()
    )
    tf.summary.histogram('eligibilitytraces', et)
    dec_et_op = tf.assign(et, discount * lambda_value * et)
    with tf.control_dependencies([dec_et_op]):
        state_action_pairs = tf.stack([states_t, actions_t], 1)
        update_et_op = tf.scatter_nd_update(et, indices=state_action_pairs, updates=tf.ones_like(states_t, dtype=tf.float32))

    reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))

    return (et, update_et_op, reset_et_op)
项目:Parser-v1    作者:tdozat    | 项目源码 | 文件源码
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9):
    """"""

    b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
    b_tm1_ = tf.gather(b_tm1, idxs)
    shape = self.get_variable_shape(x_tm1)
    tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1))
    tm1_ = tf.gather(tm1, idxs)
    t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
    t_ = tf.gather(t, idxs)
    if beta < 1:
      beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
      beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_)
    else:
      beta_t_ = tm1_/t_
    b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_)
    b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_)
    return b_t, t

  #=============================================================
项目:GalaxyGAN_python    作者:Ireneruru    | 项目源码 | 文件源码
def __init__(self):
        self.image = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))
        self.cond = tf.placeholder(tf.float32, shape=(1,conf.train_size, conf.train_size, conf.img_channel))

        self.gen_img = self.generator(self.cond)

        pos = self.discriminator(self.image, self.cond, False)
        neg = self.discriminator(self.gen_img, self.cond, True)
        pos_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pos, labels=tf.ones_like(pos)))
        neg_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.zeros_like(neg)))

        self.delta = tf.square(tf.reduce_mean(self.image)-(tf.reduce_mean(self.gen_img)))

        self.d_loss = pos_loss + neg_loss

        #with regularization
        self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.ones_like(neg))) + \
                  conf.L1_lambda * tf.reduce_mean(tf.abs(self.image - self.gen_img)) + conf.sum_lambda *self.delta

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'disc' in var.name]
        self.g_vars = [var for var in t_vars if 'gen' in var.name]
项目:odin    作者:imito    | 项目源码 | 文件源码
def renorm_rms(X, axis=1, target_rms=1.0, name="RescaleRMS"):
  """ Scales the data such that RMS of the features dimension is 1.0
  scale = sqrt(x^t x / (D * target_rms^2)).

  NOTE
  ----
  by defaults, assume the features dimension is `1`
  """
  with tf.variable_scope(name):
    D = tf.sqrt(tf.cast(tf.shape(X)[axis], X.dtype.base_dtype))
    l2norm = tf.sqrt(tf.reduce_sum(X ** 2, axis=axis, keep_dims=True))
    X_rms = l2norm / D
    X_rms = tf.where(tf.equal(X_rms, 0.),
                     x=tf.ones_like(X_rms, dtype=X_rms.dtype.base_dtype),
                     y=X_rms)
    return target_rms * X / X_rms


# ===========================================================================
# RNN and loop
# ===========================================================================
项目:hand3d    作者:lmb-freiburg    | 项目源码 | 文件源码
def _atan2(y, x):
    """ My implementation of atan2 in tensorflow.  Returns in -pi .. pi."""
    tan = tf.atan(y / (x + 1e-8))  # this returns in -pi/2 .. pi/2

    one_map = tf.ones_like(tan)

    # correct quadrant error
    correction = tf.where(tf.less(x + 1e-8, 0.0), 3.141592653589793*one_map, 0.0*one_map)
    tan_c = tan + correction  # this returns in -pi/2 .. 3pi/2

    # bring to positive values
    correction = tf.where(tf.less(tan_c, 0.0), 2*3.141592653589793*one_map, 0.0*one_map)
    tan_zero_2pi = tan_c + correction  # this returns in 0 .. 2pi

    # make symmetric
    correction = tf.where(tf.greater(tan_zero_2pi, 3.141592653589793), -2*3.141592653589793*one_map, 0.0*one_map)
    tan_final = tan_zero_2pi + correction  # this returns in -pi .. pi
    return tan_final
项目:dreamscape    作者:themattinthehatt    | 项目源码 | 文件源码
def _define_loss(self):
        """Define loss function that will be used to optimize model params"""

        # define generator loss
        with tf.variable_scope('generator'):
            self.loss_gen = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.ones_like(self.disc_gen)))

        # define discriminator loss
        with tf.variable_scope('discriminator'):
            self.loss_disc = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_real,
                    labels=tf.ones_like(self.disc_real)) +
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=self.disc_gen,
                    labels=tf.zeros_like(self.disc_gen)))

        # save summaries of losses
        tf.summary.scalar('loss_gen', self.loss_gen)
        tf.summary.scalar('loss_disc', self.loss_disc)
项目:GAN    作者:kunrenzhilu    | 项目源码 | 文件源码
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False):
        discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real),
                                                           name="disc_real_loss")

        discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake),
                                                           name="disc_fake_loss")
        self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real

        gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss")
        if use_features:
            gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2)
        else:
            gen_loss_features = 0
        self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features

        tf.summary.scalar("Discriminator_loss", self.discriminator_loss)
        tf.summary.scalar("Generator_loss", self.gen_loss)
项目:TextGAN    作者:ankitkv    | 项目源码 | 文件源码
def unwrap_output_sparse(self, final_state, include_stop_tokens=True):
        """
        Retreive the beam search output from the final state.

        Returns a sparse tensor with underlying dimensions of [batch_size, max_len]
        """
        output_dense = final_state[0]
        mask = tf.not_equal(output_dense, self.stop_token)

        if include_stop_tokens:
            output_dense = tf.concat(1, [output_dense[:, 1:],
                                         tf.ones_like(output_dense[:, 0:1]) *
                                         self.stop_token])
            mask = tf.concat(1, [mask[:, 1:], tf.cast(tf.ones_like(mask[:, 0:1],
                                                                   dtype=tf.int8),
                                                      tf.bool)])

        return sparse_boolean_mask(output_dense, mask)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_weights_by_predictions(labels_batch, predictions):
  epsilon = 1e-6
  float_labels = tf.cast(labels_batch, dtype=tf.float32)
  cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
      1 - float_labels) * tf.log(1 - predictions + epsilon)
  ce = tf.reduce_sum(tf.negative(cross_entropy_loss), axis=1)
  mean_ce = tf.reduce_mean(ce + epsilon)
  weights = tf.where(ce > mean_ce, 
                     3.0 * tf.ones_like(ce),
                     0.5 * tf.ones_like(ce))
  return weights