Python tensorflow 模块,one_hot() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.one_hot()

项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def model(self, features, labels):
        x = features["observation"]
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        x = tf.contrib.layers.convolution2d(x, 2, kernel_size=[3, 3], stride=[2, 2], activation_fn=tf.nn.elu)
        actions = tf.one_hot(tf.reshape(features["action"],[-1]), depth=6, on_value=1.0, off_value=0.0, axis=1)
        x = tf.concat(1, [tf.contrib.layers.flatten(x),  actions])
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        x = tf.contrib.layers.fully_connected(x, 100, activation_fn=tf.nn.elu)
        logits = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)
        prediction = tf.sigmoid(logits, name="prediction")
        loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.expand_dims(labels, axis=1)),name="loss")
        train_op = tf.contrib.layers.optimize_loss(
          loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
          learning_rate=self.learning_rate)
        tf.add_to_collection('prediction', prediction)
        tf.add_to_collection('loss', loss)
        return prediction, loss, train_op
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def build_model4(self):

        self.weights3, self.biases3 = self.get_en_z_variables()
        self.weights4, self.biases4 = self.get_en_y_variables()

        self.e_z = self.encode_z(self.images, weights=self.weights3, biases=self.biases3)
        self.e_y = self.encode_y(self.images, weights=self.weights4, biases=self.biases4)

        #Changing y : + 1 or +2 or +3
        self.e_y = tf.one_hot(tf.arg_max(self.e_y, 1) + self.extend_value, 10)

        self.fake_images = self.generate(self.e_z, self.e_y, weights=self.weights1, biases=self.biases1)

        t_vars = tf.trainable_variables()

        self.g_vars = [var for var in t_vars if 'gen' in var.name]
        self.enz_vars = [var for var in t_vars if 'enz' in var.name]
        self.eny_vars = [var for var in t_vars if 'eny' in var.name]

        self.saver = tf.train.Saver(self.g_vars)

        self.saver_z = tf.train.Saver(self.g_vars + self.enz_vars)
        self.saver_y = tf.train.Saver(self.eny_vars)

    #do train
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def encode_z(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='enz_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='enz_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128*7*7])

        #using tanh instead of tf.nn.relu.
        result_z = batch_normal(fully_connect(c2, weights['e3'], biases['eb3']), scope='enz_bn3')

        #result_c = tf.nn.sigmoid(fully_connect(c2, weights['e4'], biases['eb4']))

        #Transforming one-hot form
        #sparse_label = tf.arg_max(result_c, 1)

        #y_vec = tf.one_hot(sparse_label, 10)

        return result_z
项目:deep-learning    作者:ljanyst    | 项目源码 | 文件源码
def get_training_tensors(self, learning_rate = 0.001, grad_clip = 5):
        #-----------------------------------------------------------------------
        # Build a loss function
        #-----------------------------------------------------------------------
        with tf.name_scope('targets-encode'):
            y_one_hot  = tf.one_hot(self.targets, self.n_classes)
            y_reshaped = tf.reshape(y_one_hot, self.logits.get_shape())

        with tf.name_scope('loss'):
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                           labels=y_reshaped)
            loss = tf.reduce_mean(loss)
            tf.summary.scalar('loss', loss)

        #-----------------------------------------------------------------------
        # Build the optimizer
        #-----------------------------------------------------------------------
        with tf.name_scope('optimizer'):
            tvars     = tf.trainable_variables()
            grads, _  = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                               grad_clip)
            train_op  = tf.train.AdamOptimizer(learning_rate)
            optimizer = train_op.apply_gradients(zip(grads, tvars))

        return loss, optimizer
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def smoothing_cross_entropy(self,logits, labels, vocab_size, confidence=0.9): #confidence = 1.0 - label_smoothing. where label_smooth=0.1. from http://github.com/tensorflow/tensor2tensor
        """Cross entropy with label smoothing to limit over-confidence."""
        with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
            # Low confidence is given to all non-true labels, uniformly.
            low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
            # Normalizing constant is the best cross-entropy value with soft targets.
            # We subtract it just for readability, makes no difference on learning.
            normalizing = -(confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
            # Soft targets.
            soft_targets = tf.one_hot(
                tf.cast(labels, tf.int32),
                depth=vocab_size,
                on_value=confidence,
                off_value=low_confidence)
            xentropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=soft_targets)
        return xentropy - normalizing
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def smoothing_cross_entropy(self,logits, labels, vocab_size, confidence=0.9): #confidence = 1.0 - label_smoothing. where label_smooth=0.1. from http://github.com/tensorflow/tensor2tensor
        """Cross entropy with label smoothing to limit over-confidence."""
        with tf.name_scope("smoothing_cross_entropy", [logits, labels]):
            # Low confidence is given to all non-true labels, uniformly.
            low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
            # Normalizing constant is the best cross-entropy value with soft targets.
            # We subtract it just for readability, makes no difference on learning.
            normalizing = -(confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20))
            # Soft targets.
            soft_targets = tf.one_hot(
                tf.cast(labels, tf.int32),
                depth=vocab_size,
                on_value=confidence,
                off_value=low_confidence)
            xentropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=logits, labels=soft_targets)
        return xentropy - normalizing
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _generate_labels(self, overlaps):
    labels = tf.Variable(tf.ones(shape=(tf.shape(overlaps)[0],), dtype=tf.float32) * -1, trainable=False,
                         validate_shape=False)
    gt_max_overlaps = tf.arg_max(overlaps, dimension=0)
    anchor_max_overlaps = tf.arg_max(overlaps, dimension=1)
    mask = tf.one_hot(anchor_max_overlaps, tf.shape(overlaps)[1], on_value=True, off_value=False)
    max_overlaps = tf.boolean_mask(overlaps, mask)
    if self._debug:
      max_overlaps = tf.Print(max_overlaps, [max_overlaps])
    labels = tf.scatter_update(labels, gt_max_overlaps, tf.ones((tf.shape(gt_max_overlaps)[0],)))
    # TODO: extract config object
    over_threshold_mask = tf.reshape(tf.where(max_overlaps > 0.5), (-1,))
    if self._debug:
      over_threshold_mask = tf.Print(over_threshold_mask, [over_threshold_mask], message='over threshold index : ')
    labels = tf.scatter_update(labels, over_threshold_mask, tf.ones((tf.shape(over_threshold_mask)[0],)))
    # TODO: support clobber positive in the origin implement
    below_threshold_mask = tf.reshape(tf.where(max_overlaps < 0.3), (-1,))
    if self._debug:
      below_threshold_mask = tf.Print(below_threshold_mask, [below_threshold_mask], message='below threshold index : ')
    labels = tf.scatter_update(labels, below_threshold_mask, tf.zeros((tf.shape(below_threshold_mask)[0],)))
    return labels
项目:deeplab_v1_tf1.0    作者:automan000    | 项目源码 | 文件源码
def prepare_label(self, input_batch, new_size):
        """Resize masks and perform one-hot encoding.

        Args:
          input_batch: input tensor of shape [batch_size H W 1].
          new_size: a tensor with new height and width.

        Returns:
          Outputs a tensor of shape [batch_size h w 21]
          with last dimension comprised of 0's and 1's only.
        """
        with tf.name_scope('label_encode'):
            input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # As labels are integer numbers, need to use NN interp.
            input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
            input_batch = tf.one_hot(input_batch, depth=21)
        return input_batch
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def parse_mnist_tfrec(tfrecord, features_shape):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        }
    )
    features = tf.decode_raw(tfrecord_features['features'], tf.uint8)
    features = tf.reshape(features, features_shape)
    features = tf.cast(features, tf.float32)
    targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
    targets = tf.reshape(targets, [])
    targets = tf.one_hot(indices=targets, depth=10, on_value=1, off_value=0)
    targets = tf.cast(targets, tf.float32)
    return features, targets
项目:TFExperiments    作者:gnperdue    | 项目源码 | 文件源码
def parse_mnist_tfrec(tfrecord, name, features_shape, scalar_targs=False):
    tfrecord_features = tf.parse_single_example(
        tfrecord,
        features={
            'features': tf.FixedLenFeature([], tf.string),
            'targets': tf.FixedLenFeature([], tf.string)
        },
        name=name+'_data'
    )
    with tf.variable_scope('features'):
        features = tf.decode_raw(
            tfrecord_features['features'], tf.uint8
        )
        features = tf.reshape(features, features_shape)
        features = tf.cast(features, tf.float32)
    with tf.variable_scope('targets'):
        targets = tf.decode_raw(tfrecord_features['targets'], tf.uint8)
        if scalar_targs:
            targets = tf.reshape(targets, [])
        targets = tf.one_hot(
            indices=targets, depth=10, on_value=1, off_value=0
        )
        targets = tf.cast(targets, tf.float32)
    return features, targets
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def get_lookup_table(self):
        if self.lookup is None:
            vocabulary = self.get_vocabulary()
            values = np.arange(len(vocabulary))
            lookup = {}

            if self.one_hot:
                for i, key in enumerate(vocabulary):
                    lookup[key]=self.np_one_hot(values[i], len(values))
            else:
                for i, key in enumerate(vocabulary):
                    lookup[key]=values[i]

            #reverse the hash
            lookup = {i[1]:i[0] for i in lookup.items()}
            self.lookup = lookup
        return self.lookup
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def sample_output(self, val):
        vocabulary = self.get_vocabulary()
        if self.one_hot:
            vals = [ np.argmax(r) for r in val ]
            ox_val = [vocabulary[obj] for obj in list(vals)]
            string = "".join(ox_val)
            return string
        else:
            val = np.reshape(val, [-1])
            val *= len(vocabulary)/2.0
            val += len(vocabulary)/2.0
            val = np.round(val)

            val = np.maximum(0, val)
            val = np.minimum(len(vocabulary)-1, val)

            ox_val = [self.get_character(obj) for obj in list(val)]
            string = "".join(ox_val)
            return string
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(
            tf.multinomial(logits_flat, n_samples * self.n_experiments))
        shape = tf.concat([[n_samples, self.n_experiments],
                           self.batch_shape], 0)
        samples = tf.reshape(samples_flat, shape)
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        static_n_exps = self.n_experiments if isinstance(self.n_experiments,
                                                         int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples, static_n_exps]).
            concatenate(self.get_batch_shape()))
        samples = tf.reduce_sum(
            tf.one_hot(samples, self.n_categories, dtype=self.dtype), axis=1)
        return samples
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(tf.multinomial(logits_flat, n_samples))
        if self.logits.get_shape().ndims == 2:
            samples = samples_flat
        else:
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            samples = tf.reshape(samples_flat, shape)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            samples.set_shape(
                tf.TensorShape([static_n_samples]).
                concatenate(self.get_batch_shape()))
        samples = tf.one_hot(samples, self.n_categories, dtype=self.dtype)
        return samples
项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def doc2vec_prediction_model(input_vectors, input_gene, input_variation, output_label, batch_size,
                             is_training, embedding_size, output_classes):
    # inputs/outputs
    input_vectors = tf.reshape(input_vectors, [batch_size, embedding_size])
    input_gene = tf.reshape(input_gene, [batch_size, embedding_size])
    input_variation = tf.reshape(input_variation, [batch_size, embedding_size])
    targets = None
    if output_label is not None:
        output_label = tf.reshape(output_label, [batch_size, 1])
        targets = tf.one_hot(output_label, axis=-1, depth=output_classes, on_value=1.0,
                             off_value=0.0)
        targets = tf.squeeze(targets, axis=1)

    net = tf.concat([input_vectors, input_gene, input_variation], axis=1)
    net = layers.fully_connected(net, embedding_size * 2, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size, activation_fn=tf.nn.relu)
    net = layers.dropout(net, keep_prob=0.85, is_training=is_training)
    net = layers.fully_connected(net, embedding_size // 4, activation_fn=tf.nn.relu)
    logits = layers.fully_connected(net, output_classes, activation_fn=None)

    return logits, targets
项目:PixelDCN    作者:HongyangGao    | 项目源码 | 文件源码
def cal_loss(self):
        one_hot_labels = tf.one_hot(
            self.labels, depth=self.conf.class_num,
            axis=self.channel_axis, name='labels/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_labels, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_preds = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.labels, self.decoded_preds,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        # weights = tf.cast(
        #     tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
        #     tf.int32, name='m_iou/weights')
        weights = tf.cast(
            tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
            tf.int64, name='m_iou/weights')
        labels = tf.multiply(self.labels, weights, name='m_iou/mul')
        self.m_iou, self.miou_op = tf.metrics.mean_iou(
            self.labels, self.decoded_preds, self.conf.class_num,
            weights, name='m_iou/m_ious')
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def depthCELoss2(pred, gt, weight, ss, outputChannels=16):
    with tf.name_scope("depth_CE_loss"):
        pred = tf.reshape(pred, (-1, outputChannels))
        epsilon = tf.constant(value=1e-25)
        predSoftmax = tf.to_float(tf.nn.softmax(pred))

        gt = tf.one_hot(indices=tf.to_int32(tf.squeeze(tf.reshape(gt, (-1, 1)))), depth=outputChannels, dtype=tf.float32)
        ss = tf.to_float(tf.reshape(ss, (-1, 1)))
        weight = tf.to_float(tf.reshape(weight, (-1, 1)))

        crossEntropyScaling = tf.to_float([3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])

        crossEntropy = -tf.reduce_sum(((1-gt)*tf.log(tf.maximum(1-predSoftmax, epsilon))
                                       + gt*tf.log(tf.maximum(predSoftmax, epsilon)))*ss*crossEntropyScaling*weight,
                                      reduction_indices=[1])

        crossEntropySum = tf.reduce_sum(crossEntropy, name="cross_entropy_sum")
        return crossEntropySum
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def depthCELoss2(pred, gt, weight, ss, outputChannels=16):
    with tf.name_scope("depth_CE_loss"):
        pred = tf.reshape(pred, (-1, outputChannels))
        epsilon = tf.constant(value=1e-25)
        predSoftmax = tf.to_float(tf.nn.softmax(pred))

        gt = tf.one_hot(indices=tf.to_int32(tf.squeeze(tf.reshape(gt, (-1, 1)))), depth=outputChannels, dtype=tf.float32)
        ss = tf.to_float(tf.reshape(ss, (-1, 1)))
        weight = tf.to_float(tf.reshape(weight, (-1, 1)))

        crossEntropyScaling = tf.to_float([3.0, 3.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
        crossEntropy = -tf.reduce_sum(((1-gt)*tf.log(tf.maximum(1-predSoftmax, epsilon))
                                       + gt*tf.log(tf.maximum(predSoftmax, epsilon)))*ss*crossEntropyScaling*weight,
                                      reduction_indices=[1])

        crossEntropySum = tf.reduce_sum(crossEntropy, name="cross_entropy_sum")

        return crossEntropySum
项目:tensorflow-deeplab-lfov    作者:DrSleep    | 项目源码 | 文件源码
def prepare_label(self, input_batch, new_size):
        """Resize masks and perform one-hot encoding.

        Args:
          input_batch: input tensor of shape [batch_size H W 1].
          new_size: a tensor with new height and width.

        Returns:
          Outputs a tensor of shape [batch_size h w 21]
          with last dimension comprised of 0's and 1's only.
        """
        with tf.name_scope('label_encode'):
            input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # As labels are integer numbers, need to use NN interp.
            input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # Reducing the channel dimension.
            input_batch = tf.one_hot(input_batch, depth=21)
        return input_batch
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def cal_loss(self):
        expand_annotations = tf.expand_dims(
            self.annotations, -1, name='annotations/expand_dims')
        one_hot_annotations = tf.squeeze(
            expand_annotations, axis=[self.channel_axis],
            name='annotations/squeeze')
        one_hot_annotations = tf.one_hot(
            one_hot_annotations, depth=self.conf.class_num,
            axis=self.channel_axis, name='annotations/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_annotations, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_predictions = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        self.dice_accuracy_op, self.sub_dice_list = ops.dice_accuracy(self.decoded_predictions,\
                                self.annotations,self.conf.class_num)
        correct_prediction = tf.equal(
            self.annotations, self.decoded_predictions,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
项目:opinatt    作者:epochx    | 项目源码 | 文件源码
def _extract_argmax_and_one_hot(one_hot_size,
                                output_projection=None):
  """Get a loop_function that extracts the previous symbol and build a one-hot vector for it.

  Args:
    one_hot_size: total size of one-hot vector.
    output_projection: None or a pair (W, B). If provided, each fed previous
      output will first be multiplied by W and added B.
    update_embedding: Boolean; if False, the gradients will not propagate
      through the embeddings.

  Returns:
    A loop function.
  """
  def loop_function(prev, _):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1])
    prev_symbol = math_ops.argmax(prev, 1)
    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.
    emb_prev = tf.one_hot(prev_symbol, one_hot_size)
    return emb_prev

  return loop_function
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def build_graph(self, actor, critic, cfg):
        self.ph_action = graph.Placeholder(np.int32, shape=(None,), name="ph_action")
        self.ph_advantage = graph.Placeholder(np.float32, shape=(None,), name="ph_adv")
        self.ph_discounted_reward = graph.Placeholder(np.float32, shape=(None,), name="ph_edr")

        action_one_hot = tf.one_hot(self.ph_action.node, actor.action_size)

        # avoid NaN
        log_pi = tf.log(tf.maximum(actor.node, 1e-20))

        # policy entropy
        self.entropy = -tf.reduce_sum(actor.node * log_pi)

        # policy loss
        self.policy_loss = -(tf.reduce_sum(tf.reduce_sum(log_pi * action_one_hot, axis=1) * self.ph_advantage.node)
                             + self.entropy * cfg.entropy_beta)

        # value loss
        self.value_loss = tf.reduce_sum(tf.square(self.ph_discounted_reward.node - critic.node))

        # gradient of policy and value are summed up
        # (Learning rate for the Critic is sized by critic_scale parameter)
        return self.policy_loss + cfg.critic_scale * self.value_loss
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def build_graph(self, q_network, config):
        self.ph_reward = tf.placeholder(tf.float32, [None])
        self.ph_action = tf.placeholder(tf.int32, [None])
        self.ph_terminal = tf.placeholder(tf.int32, [None])
        self.ph_q_next_target = tf.placeholder(tf.float32, [None, config.output.action_size])
        self.ph_q_next = tf.placeholder(tf.float32, [None, config.output.action_size])

        action_one_hot = tf.one_hot(self.ph_action, config.output.action_size)
        q_action = tf.reduce_sum(tf.multiply(q_network.node, action_one_hot), axis=1)

        if config.double_dqn:
            q_max = tf.reduce_sum(self.ph_q_next_target * tf.one_hot(tf.argmax(self.ph_q_next, axis=1),
                                                                     config.output.action_size), axis=1)
        else:
            q_max = tf.reduce_max(self.ph_q_next_target, axis=1)

        y = self.ph_reward + tf.cast(1 - self.ph_terminal, tf.float32) * tf.scalar_mul(config.rewards_gamma, q_max)

        return tf.losses.absolute_difference(q_action, y)
项目:tensorflow-deep-qa    作者:shuishen112    | 项目源码 | 文件源码
def char_rnn_model(features, target):
  """Character level recurrent neural network model to predict classes."""
  target = tf.one_hot(target, 15, 1, 0)
  byte_list = tf.one_hot(features, 256, 1, 0)
  byte_list = tf.unstack(byte_list, axis=1)

  cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
  _, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)

  logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
  loss = tf.contrib.losses.softmax_cross_entropy(logits, target)

  train_op = tf.contrib.layers.optimize_loss(
      loss,
      tf.contrib.framework.get_global_step(),
      optimizer='Adam',
      learning_rate=0.01)

  return ({
      'class': tf.argmax(logits, 1),
      'prob': tf.nn.softmax(logits)
  }, loss, train_op)
项目:YOLO2TensorFlow    作者:PaulChongPeng    | 项目源码 | 文件源码
def inference_sequential(image_batch):
    network_fn = nets_factory.get_network_fn(
        name=FLAGS.model_name,
        num_classes=FLAGS.num_classes,
        is_training=True,
        weight_decay=FLAGS.weight_decay,
        num_anchors=5)
    net, end_points = network_fn(image_batch)

    box_coordinate, box_confidence, box_class_probs = yolo_v2.yolo_v2_head(net, FLAGS.num_classes,
                                                                           [[1, 2], [1, 3], [2, 1], [3, 1], [1, 1]],
                                                                           True)

    # preds = tf.reduce_max(box_class_probs, 4)
    # preds = tf.one_hot(tf.cast(preds, tf.int32), FLAGS.num_classes)

    # return preds

    return box_coordinate, box_confidence, box_class_probs


# =========================================================================== #
# Main training routine.
# =========================================================================== #
项目:ssd_tensorflow    作者:railsnoob    | 项目源码 | 文件源码
def debug_train_setup(self):
        """ Use this SOLELY to figure out the size of the feature maps. """

        x = tf.placeholder(tf.float32,shape=(None,\
                                            self.cfg.g("image_height"),\
                                            self.cfg.g("image_width"),\
                                            self.cfg.g("n_channels")),\
                                            name='x')

        y = tf.placeholder(tf.int32,shape=(None,self.cfg.g("num_preds")),name='y')
        one_hot_y = tf.one_hot(y,10)

        loc, conf = self._net.graph(x)

        # This is just a placeholder cost
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=conf,labels=y))

        optimizer = tf.train.AdamOptimizer(learning_rate=self.cfg.g("adam_learning_rate")).minimize(cost)
项目:Unet_3D    作者:zhengyang-wang    | 项目源码 | 文件源码
def cal_loss(self):
        one_hot_annotations = tf.one_hot(
            self.annotations, depth=self.conf.class_num,
            axis=self.channel_axis, name='annotations/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_annotations, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_predictions = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.annotations, self.decoded_predictions,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        self.softmax_predictions = tf.nn.softmax(self.predictions)
项目:tensorflow-deeplab-resnet    作者:DrSleep    | 项目源码 | 文件源码
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
    """Resize masks and perform one-hot encoding.

    Args:
      input_batch: input tensor of shape [batch_size H W 1].
      new_size: a tensor with new height and width.
      num_classes: number of classes to predict (including background).
      one_hot: whether perform one-hot encoding.

    Returns:
      Outputs a tensor of shape [batch_size h w 21]
      with last dimension comprised of 0's and 1's only.
    """
    with tf.name_scope('label_encode'):
        input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
        input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
        if one_hot:
            input_batch = tf.one_hot(input_batch, depth=num_classes)
    return input_batch
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def _build(self, inputs, *args, **kwargs):
        #images_shape = self.get_from_config('images_shape', (12, 12, 1))
        #num_classes = self.get_from_config('num_classes', 3)

        #x = tf.placeholder("float", [None] + list(images_shape), name='x')
        #y = tf.placeholder("int32",[None], name='y')
        #y_oe = tf.one_hot(y, num_classes, name='targets')

        c = conv2d_block(inputs['x'], 3, 3, conv=dict(kernel_initializer=tf.contrib.layers.xavier_initializer()), max_pooling=dict(strides=4))
        f = tf.reduce_mean(c, [1,2])
        y_ = tf.identity(f, name='predictions')

        # Define a cost function
        #tf.losses.add_loss(tf.losses.softmax_cross_entropy(y_oe, y_))
        #loss = tf.losses.softmax_cross_entropy(y_oe, y_)
        #self.train_step = tf.train.AdamOptimizer().minimize(loss)
        #print(c.shape)

        print("___________________ MyModel initialized")
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def static_nn():
        input_images = tf.placeholder("uint8", [None, 28, 28, 1])
        input_labels = tf.placeholder("uint8", [None])

        input_vectors = tf.cast(tf.reshape(input_images, [-1, 28 * 28]), 'float')
        layer1 = tf.layers.dense(input_vectors, units=512, activation=tf.nn.relu)
        layer2 = tf.layers.dense(layer1, units=256, activation=tf.nn.relu)
        model_output = tf.layers.dense(layer2, units=10)
        encoded_labels = tf.one_hot(input_labels, depth=10)

        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=encoded_labels, logits=model_output))
        optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)

        prediction = tf.argmax(model_output, 1)
        correct_prediction = tf.equal(prediction, tf.argmax(encoded_labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        return [[input_images, input_labels], [optimizer, cost, accuracy]]
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def clf_loss_oneclass(pred_logits, gt_labels, cls_num):
  """Compute classification loss for oneclass problem.

  Args:
    pred_logits: logits prediction from a model.
    gt_labels: ground truth class labels.
    cls_num: number of classes.
  Returns:
    computed loss.
  """
  with tf.variable_scope("clf_loss"):
    tf.assert_equal(tf.reduce_max(gt_labels), tf.convert_to_tensor(cls_num))
    onehot_labels = tf.one_hot(gt_labels, cls_num)
    clf_loss_elem = tf.losses.softmax_cross_entropy(onehot_labels, pred_logits)
    mean_loss = tf.reduce_mean(clf_loss_elem, 0)
  return mean_loss
项目:deep_learning    作者:wecliqued    | 项目源码 | 文件源码
def test_create_cell(self):
        seq2seq = self.seq2seq

        # we will use one hot encoding of the input batch, this is how it is constructed
        # we will use 0 for padding so our vocabulary size will increase by one
        vocab_len = len(seq2seq.vocab)
        depth = vocab_len + 1
        no_stacked_cells = self.no_stacked_cells
        hidden_size = self.hidden_size

        seq = tf.placeholder(dtype=tf.int32, shape=[None, None])
        one_hot_seq = tf.one_hot(seq, depth=depth)
        self.assertHasShape(one_hot_seq, [None, None, depth])

        # creates cell using seq as input batch placeholder
        cell, in_state = seq2seq._create_cell(one_hot_seq, no_stacked_cells)
        self.assertIsInstance(cell, tf.contrib.rnn.MultiRNNCell)
        self.assertEqual(len(in_state), no_stacked_cells)
        for state in in_state:
            self.assertHasShape(state, [None, hidden_size])

        # before calling __call__ on cell, internal variables are not created
        # not much we can test right now
        self.assertListEqual(tf.trainable_variables(), [])
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
        Y = tf.cast(Y, tf.int64)
        # work out what the mean and variance is of the indicated latent function.
        oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1., 0.), settings.float_type)
        mu_selected = tf.reduce_sum(oh_on * mu, 1)
        var_selected = tf.reduce_sum(oh_on * var, 1)

        # generate Gauss Hermite grid
        X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
            tf.sqrt(tf.clip_by_value(2. * var_selected, 1e-10, np.inf)), (-1, 1))

        # compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
        dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
            tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2)
        cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))

        cdfs = cdfs * (1 - 2e-4) + 1e-4

        # blank out all the distances on the selected latent function
        oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0., 1.), settings.float_type)
        cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)

        # take the product over the latent functions, and the sum over the GH grid.
        return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
项目:sparks    作者:ImpactHorizon    | 项目源码 | 文件源码
def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)  
    batch_size = logits.get_shape()[0].value  
    weights = tf.constant(batch_size*[H_FACTOR, T_FACTOR], tf.float32, 
                            shape=logits.get_shape())
    softmax = tf.nn.softmax(logits)
    softmax = tf.clip_by_value(softmax, 1e-10, 0.999999)

    with tf.device('/cpu:0'):
        targets = tf.one_hot(labels, depth=2)

    cross_entropy = -tf.reduce_mean(weights*targets*tf.log(softmax) + 
                                        weights*(1-targets)*tf.log(1-softmax), 
                                        reduction_indices=[1])    
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
项目:dqn    作者:elix-tech    | 项目源码 | 文件源码
def build_training_op(self, q_network_weights):
        a = tf.placeholder(tf.int64, [None])
        y = tf.placeholder(tf.float32, [None])

        # Convert action to one hot vector
        a_one_hot = tf.one_hot(a, self.num_actions, 1.0, 0.0)
        q_value = tf.reduce_sum(tf.mul(self.q_values, a_one_hot), reduction_indices=1)

        # Clip the error, the loss is quadratic when the error is in (-1, 1), and linear outside of that region
        error = tf.abs(y - q_value)
        quadratic_part = tf.clip_by_value(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)

        optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, momentum=MOMENTUM, epsilon=MIN_GRAD)
        grad_update = optimizer.minimize(loss, var_list=q_network_weights)

        return a, y, loss, grad_update
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def metric(self, predictions, targets, num_classes=None, batch_size=None, **kwargs):
        """
        Computes Kappa metric

        Args:
            predictions: 2D tensor/array, predictions of the network
            targets: 2D tensor/array, ground truth labels of the network
            num_classes: int, num_classes of the network
            batch_size: batch_size for predictions of the network

        Returns:
            Kappa score
        """
        if num_classes is None:
            num_classes = self.num_classes
        if batch_size is None:
            batch_size = self.batch_size
        targets = tf.convert_to_tensor(targets)
        predictions = tf.convert_to_tensor(predictions)
        if targets.get_shape().ndims == 1:
            targets = tf.one_hot(targets, num_classes, on_value=1, off_value=0)
        if predictions.get_shape().ndims == 1:
            predictions = tf.one_hot(
                predictions, num_classes, on_value=1, off_value=0)
        return self._kappa_loss(predictions, targets, batch_size=batch_size, num_ratings=num_classes, **kwargs)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def metric(self, predictions, targets, num_classes=5):
        """
        Computes auroc metric

        Args:
            predictions: 2D tensor/array, predictions of the network
            targets: 2D tensor/array, ground truth labels of the network
            num_classes: int, num_classes of the network

        Returns:
            auroc score
        """
        if targets.ndim == 2:
            targets = np.argmax(targets, axis=1)
        if predictions.ndim == 1:
            predictions = one_hot(predictions, m=num_classes)
        return self._auroc(predictions, targets)
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def accuracy_op(predictions, targets, num_classes=5):
    """
    Computes accuracy metric

    Args:
        predictions: 2D tensor/array, predictions of the network
        targets: 2D tensor/array, ground truth labels of the network
        num_classes: int, num_classes of the network

    Returns:
        accuracy
    """
    with tf.name_scope('Accuracy'):
        if targets.ndim == 2:
            targets = np.argmax(targets, axis=1)
        if predictions.ndim == 1:
            predictions = one_hot(predictions, m=num_classes)
        acc = accuracy_score(targets, np.argmax(predictions, axis=1))
    return acc
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _sparse_loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using sparse softmax loss')
        labels = tf.cast(labels, tf.int64)
        if weighted:
            if tf.rank(labels) != 2:
                labels = tf.one_hot(labels, self.num_classes)
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.sparse_softmax_cross_entropy(
                tf.argmax(labels, axis=1), logits=logits, weights=weights, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _loss_softmax(self, logits, labels, is_training, weighted=False):
        log.info('Using softmax loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            weights = tf.reduce_max(tf.multiply(weights, labels), axis=1)
            ce_loss = tf.losses.softmax_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='cross_entropy_loss')
        else:
            ce_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=labels, logits=logits, name='cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def _loss_sigmoid(self, logits, labels, is_training, weighted=False):
        log.info('Using sigmoid loss')
        labels = tf.cast(labels, tf.int64)
        if tf.rank(labels) != 2:
            labels = tf.one_hot(labels, self.num_classes)
        if weighted:
            weights = self._compute_weights(labels)
            ce_loss = tf.losses.sigmoid_cross_entropy(
                labels, logits=logits, weights=weights, label_smoothing=self.label_smoothing, scope='sigmoid_cross_entropy_loss')
        else:
            ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=labels, logits=logits, name='sigmoid_cross_entropy_loss')
        ce_loss_mean = tf.reduce_mean(ce_loss, name='sigmoid_cross_entropy')
        if is_training:
            tf.add_to_collection('losses', ce_loss_mean)

            l2_loss = tf.add_n(tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES))
            l2_loss = l2_loss * self.cnf.get('l2_reg', 0.0)
            tf.add_to_collection('losses', l2_loss)

            return tf.add_n(tf.get_collection('losses'), name='total_loss')
        else:
            return ce_loss_mean
项目:A3C    作者:go2sea    | 项目源码 | 文件源码
def actor_loss(self):
        if self.config.mode == 'discrete':
            log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.action_input, self.action_dim, dtype=tf.float32),
                                     axis=1, keep_dims=True)
            # use entropy to encourage exploration
            exp_v = log_prob * self.TD_loss
            entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True)  # encourage exploration
            exp_v = self.config.ENTROPY_BETA * entropy + exp_v
            return tf.reduce_mean(-exp_v)  # ????????log_prb????????????????????TD_loss
        elif self.config.mode == 'continuous':
            log_prob = self.action_normal_dist.log_prob(self.action_input)
            exp_v = log_prob * self.TD_loss
            # use entropy to encourage exploration
            exp_v = self.config.ENTROPY_BETA * self.action_normal_dist.entropy() + exp_v
            return tf.reduce_mean(-exp_v)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss_mix(self, predictions, predictions_class, labels, **unused_params):
    with tf.name_scope("loss_mix"):
      float_labels = tf.cast(labels, tf.float32)
      if FLAGS.support_type=="class":
        seq = np.loadtxt(FLAGS.class_file)
        tf_seq = tf.one_hot(tf.constant(seq,dtype=tf.int32),FLAGS.encoder_size)
        float_classes_org = tf.matmul(float_labels,tf_seq)
        class_true = tf.ones(tf.shape(float_classes_org))
        class_false = tf.zeros(tf.shape(float_classes_org))
        float_classes = tf.where(tf.greater(float_classes_org, class_false), class_true, class_false)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="frequent":
        float_classes = float_labels[:,0:FLAGS.encoder_size]
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      elif FLAGS.support_type=="encoder":
        float_classes = float_labels
        for i in range(FLAGS.encoder_layers):
          var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
          weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
          bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
          float_classes = tf.nn.xw_plus_b(float_classes,weight_i,bias_i)
          if i<FLAGS.encoder_layers-1:
            float_classes = tf.nn.relu(float_classes)
          else:
            float_classes = tf.nn.sigmoid(float_classes)
            #float_classes = tf.nn.relu(tf.sign(float_classes - 0.5))
        cross_entropy_class = self.calculate_mseloss(predictions_class,float_classes)
      else:
        float_classes = float_labels
        for i in range(FLAGS.moe_layers-1):
          float_classes = tf.concat((float_classes,float_labels),axis=1)
        cross_entropy_class = self.calculate_loss(predictions_class,float_classes)
      cross_entropy_loss = self.calculate_loss(predictions,labels)
      return cross_entropy_loss + 0.1*cross_entropy_class
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
        """Creates a logistic model.

        Args:
          model_input: 'batch' x 'num_features' matrix of input features.
          vocab_size: The number of classes in the dataset.

        Returns:
          A dictionary with a tensor containing the probability predictions of the
          model in the 'predictions' key. The dimensions of the tensor are
          batch_size x num_classes."""
        model_input = tf.cast(model_input,dtype=tf.float32)
        hidden_size = FLAGS.hidden_size

        model_mask, indices_input = tf.nn.top_k(model_input, k=FLAGS.top_k)
        indices_input = tf.reshape(indices_input, [-1])
        models_mask = tf.reshape(model_mask, [-1,FLAGS.top_k,1])
        with tf.name_scope("embedding"):
            embeddings = tf.Variable(
                tf.random_uniform([vocab_size, hidden_size], -1.0, 1.0))
            embed = tf.nn.embedding_lookup(embeddings, indices_input)
            output = slim.fully_connected(
                embed,
                vocab_size,
                activation_fn=tf.nn.sigmoid,
                weights_regularizer=slim.l2_regularizer(l2_penalty),
                scope="output")
        indices_one_hot = tf.one_hot(indices_input, vocab_size)
        output = output * (1 - indices_one_hot) + indices_one_hot
        output_val = tf.reshape(output,[-1,FLAGS.top_k,vocab_size])
        predictions_val = tf.reduce_sum(output_val*models_mask, axis=1)/tf.reduce_sum(models_mask, axis=1)
        return {"predictions": output, "predictions_val": predictions_val}
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def categorical_sample(logits, d):
    value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
    return tf.one_hot(value, d)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def categorical_max(logits, d):
    value = tf.argmax(logits - tf.reduce_max(logits, [1], keep_dims=True), axis=1)
    return tf.one_hot(value, d)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def categorical_sample(logits, d):
    value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
    return tf.one_hot(value, d)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def categorical_max(logits, d):
    value = tf.argmax(logits - tf.reduce_max(logits, [1], keep_dims=True), axis=1)
    return tf.one_hot(value, d)
项目:ICGan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def encode_y(self, x, weights, biases):

        c1 = tf.nn.relu(batch_normal(conv2d(x, weights['e1'], biases['eb1']), scope='eny_bn1'))

        c2 = tf.nn.relu(batch_normal(conv2d(c1, weights['e2'], biases['eb2']), scope='eny_bn2'))

        c2 = tf.reshape(c2, [self.batch_size, 128 * 7 * 7])

        result_y = tf.nn.sigmoid(fully_connect(c2, weights['e3'], biases['eb3']))

        #y_vec = tf.one_hot(tf.arg_max(result_y, 1), 10)

        return result_y
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def mask_probs(probs, eos_token, finished):
  """Masks log probabilities such that finished beams
  allocate all probability mass to eos. Unfinished beams remain unchanged.

  Args:
    probs: Log probabiltiies of shape `[beam_width, vocab_size]`
    eos_token: An int32 id corresponding to the EOS token to allocate
      probability to
    finished: A boolean tensor of shape `[beam_width]` that specifies which
      elements in the beam are finished already.

  Returns:
    A tensor of shape `[beam_width, vocab_size]`, where unfinished beams
    stay unchanged and finished beams are replaced with a tensor that has all
    probability on the EOS token.
  """
  vocab_size = tf.shape(probs)[1]
  finished_mask = tf.expand_dims(tf.to_float(1. - tf.to_float(finished)), 1)
  # These examples are not finished and we leave them
  non_finished_examples = finished_mask * probs
  # All finished examples are replaced with a vector that has all
  # probability on EOS
  finished_row = tf.one_hot(
      eos_token,
      vocab_size,
      dtype=tf.float32,
      on_value=0.,
      off_value=tf.float32.min)
  finished_examples = (1. - finished_mask) * finished_row
  return finished_examples + non_finished_examples