Python tensorflow 模块,equal() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.equal()

项目:handson-tensorflow    作者:winnietsang    | 项目源码 | 文件源码
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    # Placeholder that will be fed image data.
    x = tf.placeholder(tf.float32, [None, 784])
    # Placeholder that will be fed the correct labels.
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Define weight and bias.
    W = weight_variable([784, 10])
    b = bias_variable([10])

    # Here we define our model which utilizes the softmax regression.
    y = tf.nn.softmax(tf.matmul(x, W) + b)

    # Define our loss.
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

    # Define our optimizer.
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    # Define accuracy.
    correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
    correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs):
        tf.reset_default_graph()

        self.n_class = n_class
        self.summaries = kwargs.get("summaries", True)

        self.x = tf.placeholder("float", shape=[None, None, None, channels])
        self.y = tf.placeholder("float", shape=[None, None, None, n_class])
        self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

        logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs)

        self.cost = self._get_cost(logits, cost, cost_kwargs)

        self.gradients_node = tf.gradients(self.cost, self.variables)

        self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]),
                                                          tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class])))

        self.predicter = pixel_wise_softmax_2(logits)
        self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def step_with_training(self, training=None):

        def step(inputs, states):
            input_shape = K.int_shape(inputs)
            y_tm1 = self.layer.preprocess_input(
                K.expand_dims(states[0], axis=1),
                training
            )
            y_tm1 = K.reshape(y_tm1, (-1, input_shape[-1]))

            inputs_sum = tf.reduce_sum(inputs)

            def inputs_f(): return inputs
            def output_f(): return y_tm1
            current_inputs = tf.case(
                [(tf.equal(inputs_sum, 0.0), output_f)],
                default=inputs_f
            )

            return self.layer.step(
                current_inputs,
                states
            )

        return step
项目:deligan    作者:val-iisc    | 项目源码 | 文件源码
def add_evaluation_step(graph, final_tensor_name, ground_truth_tensor_name):
    """Inserts the operations we need to evaluate the accuracy of our results.
    Args:
      graph: Container for the existing model's Graph.
      final_tensor_name: Name string for the new final node that produces results.
      ground_truth_tensor_name: Name string for the node we feed ground truth data
      into.
    Returns:
      Nothing.
    """
    result_tensor = graph.get_tensor_by_name(ensure_name_has_port(
        final_tensor_name))
    ground_truth_tensor = graph.get_tensor_by_name(ensure_name_has_port(
        ground_truth_tensor_name))
    correct_prediction = tf.equal(
        tf.argmax(result_tensor, 1), tf.argmax(ground_truth_tensor, 1))
    evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
    return evaluation_step
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_loss(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)

        ans = tf.argmax(y_gt, 1)
        correct = tf.equal(ans, tf.argmax(y_out, 1))
        top5_acc = tf.reduce_sum(tf.to_float(
            tf.nn.in_top_k(y_out, ans, 5))) / num_ex_f
        self.register_var('top5_acc', top5_acc)
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        return total_loss
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_loss_grad(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        learn_rate = self.get_option('learn_rate')
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)
        eps = self.get_option('adam_eps')
        optimizer = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
        global_step = tf.Variable(0.0)
        self.register_var('step', global_step)
        train_step = optimizer.minimize(
            total_loss, global_step=global_step)
        self.register_var('train_step', train_step)
        correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        pass
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def _build(self):
        # ?????????? --- build
        self._lin = photinia.Linear('LINEAR', self._input_size, self._num_classes).build()
        # ????
        x = tf.placeholder(dtype=photinia.D_TYPE, shape=[None, self._input_size])
        y_ = tf.placeholder(dtype=photinia.D_TYPE, shape=[None, self._num_classes])
        # ?????? --- setup
        y = self._lin.setup(x)
        # ??????? softmax?????
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
        # accuracy??
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, photinia.D_TYPE))
        # ????????slot
        self._add_slot(
            'train',
            outputs=loss,
            inputs=(x, y_),
            updates=tf.train.GradientDescentOptimizer(0.5).minimize(loss)
        )
        self._add_slot(
            'predict',
            outputs=accuracy,
            inputs=(x, y_)
        )
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def categorical_accuracy_with_variable_timestep(y_true, y_pred):
    # Actually discarding is not needed if the dummy is an all-zeros array
    # (It is indeed encoded in an all-zeros array by
    # CaptionPreprocessing.preprocess_batch)
    y_true = y_true[:, :-1, :]  # Discard the last timestep/word (dummy)
    y_pred = y_pred[:, :-1, :]  # Discard the last timestep/word (dummy)

    # Flatten the timestep dimension
    shape = tf.shape(y_true)
    y_true = tf.reshape(y_true, [-1, shape[-1]])
    y_pred = tf.reshape(y_pred, [-1, shape[-1]])

    # Discard rows that are all zeros as they represent dummy or padding words.
    is_zero_y_true = tf.equal(y_true, 0)
    is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1)
    y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true)
    y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1),
                                               tf.argmax(y_pred, axis=1)),
                                      dtype=tf.float32))
    return accuracy


# As Keras stores a function's name as its metric's name
项目:gae    作者:tkipf    | 项目源码 | 文件源码
def __init__(self, preds, labels, model, num_nodes, pos_weight, norm):
        preds_sub = preds
        labels_sub = labels

        self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
        self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)  # Adam Optimizer

        # Latent loss
        self.log_lik = self.cost
        self.kl = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_log_std - tf.square(model.z_mean) -
                                                                   tf.square(tf.exp(model.z_log_std)), 1))
        self.cost -= self.kl

        self.opt_op = self.optimizer.minimize(self.cost)
        self.grads_vars = self.optimizer.compute_gradients(self.cost)

        self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
                                           tf.cast(labels_sub, tf.int32))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def generate_mask(img_mask_list, h, w, l):
    img_masks, loss_masks = [], []

    for i in range(l):
        # generate image mask
        img_mask = img_mask_list[i]
        img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
        img_mask = tf.reshape(img_mask, (h, w))
        img_masks.append(img_mask)

        # generate loss mask
        s_total   = h * w
        s_mask    = tf.reduce_sum(img_mask)
        def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
        def f2(): return tf.zeros_like(img_mask)
        def f3(): return tf.ones_like(img_mask)
        loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
                             (tf.less(s_mask, s_total/2), f1)],
                             default=f3)

        loss_masks.append(loss_mask)

    return tf.stack(img_masks), tf.stack(loss_masks)
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def _largest_size_at_most(height, width, largest_side):
  """Computes new shape with the largest side equal to `largest_side`.
  Computes new shape with the largest side equal to `largest_side` while
  preserving the original aspect ratio.
  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    largest_side: A python integer or scalar `Tensor` indicating the size of
      the largest side after resize.
  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  largest_side = tf.to_float(largest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: largest_side / height,
                  lambda: largest_side / width)
  new_height = tf.to_int32(height * scale)
  new_width = tf.to_int32(width * scale)
  return new_height, new_width
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, embedding_length):
    self._calculator_loom = CalculatorLoom(embedding_length)

    self._labels_placeholder = tf.placeholder(tf.float32)
    self._classifier_weights = tf.Variable(
        tf.truncated_normal([embedding_length, 3],
                            dtype=tf.float32,
                            stddev=1),
        name='classifier_weights')

    self._output_weights = tf.matmul(
        self._calculator_loom.output(), self._classifier_weights)
    self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
        logits=self._output_weights, labels=self._labels_placeholder))

    self._true_labels = tf.argmax(self._labels_placeholder, dimension=1)
    self._prediction = tf.argmax(self._output_weights, dimension=1)

    self._accuracy = tf.reduce_mean(tf.cast(
        tf.equal(self._true_labels, self._prediction),
        dtype=tf.float32))
项目:tensorflow-prebuilt-classifier    作者:recursionbane    | 项目源码 | 文件源码
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def train_neural_network(x):
    prediction = convolutional_neural_network(x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hm_epochs = 10
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        for epoch in range(hm_epochs):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
                epoch_loss += c

            print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))

        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].

  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.

  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)])[0]
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].

  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.

  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)])[0]
项目:blitznet    作者:dvornikita    | 项目源码 | 文件源码
def apply_with_random_selector(x, func, num_cases):
    """Computes func(x, sel), with sel sampled from [0...num_cases-1].

    Args:
        x: input Tensor.
        func: Python function to apply.
        num_cases: Python int32, number of cases to sample sel from.

    Returns:
        The result of func(x, sel), where func receives the value of the
        selector as a python integer, but sel is sampled dynamically.
    """
    sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
    # Pass the real x only to one of the func calls.
    return control_flow_ops.merge([
        func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
        for case in range(num_cases)])[0]
项目:image_recognition    作者:tue-robotics    | 项目源码 | 文件源码
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Nothing.
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
        tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
项目:tensorflow-image-classifier    作者:burliEnterprises    | 项目源码 | 文件源码
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Tuple of (evaluation step, prediction).
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      prediction = tf.argmax(result_tensor, 1)
      correct_prediction = tf.equal(
          prediction, tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  tf.summary.scalar('accuracy', evaluation_step)
  return evaluation_step, prediction
项目:PixelDCN    作者:HongyangGao    | 项目源码 | 文件源码
def cal_loss(self):
        one_hot_labels = tf.one_hot(
            self.labels, depth=self.conf.class_num,
            axis=self.channel_axis, name='labels/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_labels, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_preds = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        correct_prediction = tf.equal(
            self.labels, self.decoded_preds,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
        # weights = tf.cast(
        #     tf.greater(self.decoded_preds, 0, name='m_iou/greater'),
        #     tf.int32, name='m_iou/weights')
        weights = tf.cast(
            tf.less(self.labels, self.conf.channel, name='m_iou/greater'),
            tf.int64, name='m_iou/weights')
        labels = tf.multiply(self.labels, weights, name='m_iou/mul')
        self.m_iou, self.miou_op = tf.metrics.mean_iou(
            self.labels, self.decoded_preds, self.conf.class_num,
            weights, name='m_iou/m_ious')
项目:hyperchamber    作者:255BITS    | 项目源码 | 文件源码
def create(config):
    batch_size = config["batch_size"]
    x = tf.placeholder(tf.float32, [batch_size, X_DIMS[0], X_DIMS[1], 1], name="x")
    y = tf.placeholder(tf.float32, [batch_size, Y_DIMS], name="y")

    hidden = hidden_layers(config, x)
    output = output_layer(config, hidden)

    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, y), name="loss")

    output = tf.nn.softmax(output)
    correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    variables = tf.trainable_variables()

    optimizer = tf.train.GradientDescentOptimizer(config['learning_rate']).minimize(loss)


    set_tensor("x", x)
    set_tensor("y", y)
    set_tensor("loss", loss)
    set_tensor("optimizer", optimizer)
    set_tensor("accuracy", accuracy)
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
    y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
    y0 = tf.to_int32(tf.greater(y0, 0.5))

    def _cond(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
        y = tf.to_int32(tf.greater(y, 0.5))
        return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))

    def _body(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.reshape(model(xadv), [-1])[0]
        g = tf.gradients(y, xadv)[0]
        dx = - y * g / tf.norm(g)
        return i+1, z+dx

    _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
                             name='_deepfool2_impl', back_prop=False)
    return noise
项目:oversight    作者:hebenon    | 项目源码 | 文件源码
def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Nothing.
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
        tf.argmax(ground_truth_tensor, 1))
    with tf.name_scope('accuracy'):
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', evaluation_step)
  return evaluation_step
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
        DiceRatio            =     DiceRatio + Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def dice_accuracy(decoded_predictions, annotations, class_nums):
    DiceRatio = tf.constant(0,tf.float32)
    misclassnum = tf.constant(0,tf.float32)
    class_num   = tf.constant(class_nums,tf.float32) 
    sublist   =  []
    for index in range(1,class_nums-2):
        current_annotation   =     tf.cast(tf.equal(tf.ones_like(annotations)*index,\
                                        annotations),tf.float32)
        cureent_prediction   =     tf.cast(tf.equal(tf.ones_like(decoded_predictions)*index,\
                                            decoded_predictions),tf.float32)
        Overlap              =     tf.add(current_annotation,cureent_prediction)
        Common               =     tf.reduce_sum(tf.cast(tf.equal(tf.ones_like(Overlap)*2,Overlap),\
                                                            tf.float32),[0,1,2,3])
        annotation_num       =     tf.reduce_sum(current_annotation,[0,1,2,3])
        predict_num          =     tf.reduce_sum(cureent_prediction,[0,1,2,3])
        all_num              =     tf.add(annotation_num,predict_num)
        Sub_DiceRatio        =     0       
    Sub_DiceRatio        =     Common*2/tf.clip_by_value(all_num, 1e-10, 1e+10)
        misclassnum          =     tf.cond(tf.equal(Sub_DiceRatio,0.0), lambda: misclassnum + 1, lambda: misclassnum)
        sublist.append(Sub_DiceRatio)
    DiceRatio            =     DiceRatio + Sub_DiceRatio
    del Sub_DiceRatio
    DiceRatio                =     DiceRatio/tf.clip_by_value(tf.cast((class_num-misclassnum-3),tf.float32),1e-10,1e+1000)
    return DiceRatio, sublist
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def cal_loss(self):
        expand_annotations = tf.expand_dims(
            self.annotations, -1, name='annotations/expand_dims')
        one_hot_annotations = tf.squeeze(
            expand_annotations, axis=[self.channel_axis],
            name='annotations/squeeze')
        one_hot_annotations = tf.one_hot(
            one_hot_annotations, depth=self.conf.class_num,
            axis=self.channel_axis, name='annotations/one_hot')
        losses = tf.losses.softmax_cross_entropy(
            one_hot_annotations, self.predictions, scope='loss/losses')
        self.loss_op = tf.reduce_mean(losses, name='loss/loss_op')
        self.decoded_predictions = tf.argmax(
            self.predictions, self.channel_axis, name='accuracy/decode_pred')
        self.dice_accuracy_op, self.sub_dice_list = ops.dice_accuracy(self.decoded_predictions,\
                                self.annotations,self.conf.class_num)
        correct_prediction = tf.equal(
            self.annotations, self.decoded_predictions,
            name='accuracy/correct_pred')
        self.accuracy_op = tf.reduce_mean(
            tf.cast(correct_prediction, tf.float32, name='accuracy/cast'),
            name='accuracy/accuracy_op')
项目:good-semi-bad-gan    作者:christiancosgrove    | 项目源码 | 文件源码
def plot(samples):
    width = min(12,int(np.sqrt(len(samples))))
    fig = plt.figure(figsize=(width, width))
    gs = gridspec.GridSpec(width, width)
    gs.update(wspace=0.05, hspace=0.05)

    for ind, sample in enumerate(samples):
        if ind >= width*width:
            break
        ax = plt.subplot(gs[ind])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        sample = sample * 0.5 + 0.5
        sample = np.transpose(sample, (1, 2, 0))
        plt.imshow(sample)

    return fig
项目:X-ray-classification    作者:bendidi    | 项目源码 | 文件源码
def apply_with_random_selector(x, func, num_cases):
  """Computes func(x, sel), with sel sampled from [0...num_cases-1].
  Args:
    x: input Tensor.
    func: Python function to apply.
    num_cases: Python int32, number of cases to sample sel from.
  Returns:
    The result of func(x, sel), where func receives the value of the
    selector as a python integer, but sel is sampled dynamically.
  """
  sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
  # Pass the real x only to one of the func calls.
  return control_flow_ops.merge([
      func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
      for case in range(num_cases)])[0]
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def _maybe_tensor_gather_helper(gather_indices, gather_from, batch_size,
                                range_size, gather_shape):
    """Maybe applies _tensor_gather_helper.
    This applies _tensor_gather_helper when the gather_from dims is at least as
    big as the length of gather_shape. This is used in conjunction with nest so
    that we don't apply _tensor_gather_helper to inapplicable values like scalars.
    Args:
      gather_indices: The tensor indices that we use to gather.
      gather_from: The tensor that we are gathering from.
      batch_size: The batch size.
      range_size: The number of values in each range. Likely equal to beam_width.
      gather_shape: What we should reshape gather_from to in order to preserve the
        correct values. An example is when gather_from is the attention from an
        AttentionWrapperState with shape [batch_size, beam_width, attention_size].
        There, we want to preserve the attention_size elements, so gather_shape is
        [batch_size * beam_width, -1]. Then, upon reshape, we still have the
        attention_size as desired.
    Returns:
      output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
        or the original tensor if its dimensions are too small.
    """
    if gather_from.shape.ndims >= len(gather_shape):
        return _tensor_gather_helper(
            gather_indices=gather_indices,
            gather_from=gather_from,
            batch_size=batch_size,
            range_size=range_size,
            gather_shape=gather_shape)
    else:
        return gather_from
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
                          range_size, gather_shape):
    """Helper for gathering the right indices from the tensor.
    This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
    gathering from that according to the gather_indices, which are offset by
    the right amounts in order to preserve the batch order.
    Args:
      gather_indices: The tensor indices that we use to gather.
      gather_from: The tensor that we are gathering from.
      batch_size: The input batch size.
      range_size: The number of values in each range. Likely equal to beam_width.
      gather_shape: What we should reshape gather_from to in order to preserve the
        correct values. An example is when gather_from is the attention from an
        AttentionWrapperState with shape [batch_size, beam_width, attention_size].
        There, we want to preserve the attention_size elements, so gather_shape is
        [batch_size * beam_width, -1]. Then, upon reshape, we still have the
        attention_size as desired.
    Returns:
      output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
    """
    range_ = tf.expand_dims(tf.range(batch_size) * range_size, 1)
    gather_indices = tf.reshape(gather_indices + range_, [-1])
    output = tf.gather(tf.reshape(gather_from, gather_shape), gather_indices)
    final_shape = tf.shape(gather_from)[:1 + len(gather_shape)]
    final_static_shape = (tf.TensorShape([None]).concatenate(gather_from.shape[1:1 + len(gather_shape)]))
    output = tf.reshape(output, final_shape)
    output.set_shape(final_static_shape)
    return output
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def build_graph(input_reader, input_data_pattern,
                prediction_reader, prediction_data_pattern,
                batch_size=256):
  """Creates the Tensorflow graph for evaluation.

  Args:
    all_readers: The data file reader. It should inherit from BaseReader.
    model: The core model (e.g. logistic or neural net). It should inherit
           from BaseModel.
    all_data_patterns: glob path to the evaluation data files.
    label_loss_fn: What kind of loss to apply to the model. It should inherit
                from BaseLoss.
    batch_size: How many examples to process at a time.
  """

  video_ids_batch, model_inputs_batch, labels_batch, unused_num_frames = (
      get_input_data_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))
  video_ids_batch2, model_predictions_batch, labels_batch2, unused_num_frames2 = (
      get_input_data_tensors(
          prediction_reader,
          prediction_data_pattern,
          batch_size=batch_size))

  video_ids_equal = tf.reduce_mean(tf.cast(tf.equal(video_ids_batch, video_ids_batch2), tf.float32))
  labels_equal = tf.reduce_mean(tf.reduce_sum(tf.cast(tf.equal(labels_batch, labels_batch2), tf.float32), axis=1))

  tf.add_to_collection("video_ids_equal", video_ids_equal)
  tf.add_to_collection("labels_equal", labels_equal)
  tf.add_to_collection("video_ids_batch", video_ids_batch)
  tf.add_to_collection("labels_batch", tf.cast(labels_batch, tf.float32))
  tf.add_to_collection("inputs_batch", model_inputs_batch)
  tf.add_to_collection("predictions_batch", model_predictions_batch)
项目:TensorFlow-World    作者:astorfi    | 项目源码 | 文件源码
def inference_fn(W,b,x_data,y_target):
    prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
    return accuracy
项目:US-image-prediction    作者:ChengruiWu008    | 项目源码 | 文件源码
def correct_prediction(correct_pred=0):
    for i in range(PIXEL):
        correct_prediction = tf.equal(tf.argmax(conv_5[0][i], 1), tf.argmax(y[i], 1))
        correct_pred += correct_prediction
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    return accuracy
项目:scientific-paper-summarisation    作者:EdCo95    | 项目源码 | 文件源码
def bilstm_reader(placeholders, vocab_size, emb_dim, drop_keep_prob=1.0):
    # [batch_size, max_seq_length]
    sentences = placeholders['sentences']

    # [batch_size, candidate_size]
    targets = tf.to_float(placeholders['sentence_labels'])

    with tf.variable_scope("embeddings"):
        embeddings = tf.get_variable("word_embeddings", [vocab_size, emb_dim], dtype=tf.float32)

    with tf.variable_scope("embedders") as varscope:
        sentences_embedded = tf.nn.embedding_lookup(embeddings, sentences)

    with tf.variable_scope("bilstm_reader") as varscope1:

        # states: (c_fw, h_fw), (c_bw, h_bw)
        outputs, states = reader(sentences_embedded, placeholders['sentences_lengths'], emb_dim,
                                scope=varscope1, drop_keep_prob=drop_keep_prob)

        # concat fw and bw outputs
        output = tf.concat(1, [states[0][1], states[1][1]])

    scores = tf.contrib.layers.linear(output, 2)  # we don't strictly need this as we've only got 2 targets
    # add non-linearity
    scores = tf.nn.tanh(scores)
    loss = tf.nn.softmax_cross_entropy_with_logits(scores, targets)
    predict = tf.nn.softmax(scores)

    predictions = tf.argmax(predict, axis=1)
    true_vals = tf.argmax(targets, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, true_vals), tf.float32))

    saver = tf.train.Saver()

    return scores, loss, predict, accuracy, saver
项目:scientific-paper-summarisation    作者:EdCo95    | 项目源码 | 文件源码
def graph():

    # Define placeholders for the data

    # The sentence to classify, has shape [batch_size x word_dimensions]
    sentence_input = tf.placeholder(tf.float32, shape=[None, NUM_FEATURES])

    # The labels for the sentences as one-hot vectors, of the form [batch_size x num_classes]
    labels = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

    # Define the computation graph

    # Linear layer
    sent_weight = weight_variable([NUM_FEATURES, NUM_CLASSES])
    sent_bias = bias_variable([NUM_CLASSES])

    output = tf.matmul(sentence_input, sent_weight) + sent_bias

    # Define the loss function
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, labels))
    opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    # Predictions
    predictions = tf.nn.softmax(output)

    # Calculate accuracy
    pred_answers = tf.argmax(output, axis=1)
    correct_answers = tf.argmax(labels, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_answers, correct_answers), tf.float32))

    return sentence_input, labels, loss, opt, predictions, pred_answers, correct_answers, accuracy, sent_weight
项目:scientific-paper-summarisation    作者:EdCo95    | 项目源码 | 文件源码
def graph():
    """
    Function to encapsulate the construction of a TensorFlow computation graph.
    :return: input placeholders, optimisation operation, loss, accuracy, prediction operations
    """

    # Define placeholders for the data

    # The sentence to classify, has shape [batch_size x word_dimensions*2] because the input will be the sentence
    # and abstract concatenated.
    sentence_input = tf.placeholder(tf.float32, shape=[None, WORD_DIMENSIONS + ABSTRACT_DIMENSION + NUM_FEATURES])

    # The labels for the sentences as one-hot vectors, of the form [batch_size x num_classes]
    labels = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

    # Define the computation graph

    # The keep gate - decides which parts to keep
    keep_weight = weight_variable([WORD_DIMENSIONS + ABSTRACT_DIMENSION + NUM_FEATURES, NUM_CLASSES])
    keep_bias = bias_variable([NUM_CLASSES])
    output = tf.matmul(sentence_input, keep_weight) + keep_bias

    # Define the loss function
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, labels))
    opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    # Predictions
    predictions = tf.nn.softmax(output)

    # Calculate accuracy
    pred_answers = tf.argmax(output, axis=1)
    correct_answers = tf.argmax(labels, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_answers, correct_answers), tf.float32))

    return sentence_input, labels, loss, opt, predictions, pred_answers, correct_answers, accuracy

# Construct the computation graph
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _get_loss(self,labels):
        # build the self.loss tensor
        # This function could be overwritten

        #print("pred {} label{}".format(self.logit.dtype,labels.dtype))

        with tf.name_scope("Loss"):
            with tf.name_scope("cross_entropy"):
                labels = tf.cast(labels, tf.float32)
                #self.logit = tf.cast(self.logit, tf.float32)
                self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logit, labels=labels))
            self._get_l2_loss()
            with tf.name_scope("accuracy"):
                y_label = tf.argmax(labels, 1)
                yp_label = tf.argmax(self.logit, 1)
                correct_pred = tf.equal(yp_label,y_label)
                self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        with tf.name_scope("summary"):
            if self.flags.visualize:
                tf.summary.scalar(name='TRAIN_CrossEntropy', tensor=self.loss, collections=[tf.GraphKeys.SCALARS])
                tf.summary.scalar(name='TRAIN_Accuracy', tensor=self.acc, collections=[tf.GraphKeys.SCALARS])

                tf.summary.scalar(name='TRAIN_L2loss', tensor=self.l2loss, collections=[tf.GraphKeys.SCALARS]
)

                if 'acc' in self.flags.visualize:
                    tf.summary.histogram(name='pred', values=yp_label, collections=[tf.GraphKeys.FEATURE_MAPS
])
                    tf.summary.histogram(name='truth', values=y_label, collections=[tf.GraphKeys.FEATURE_MAPS
])
                    for cl in range(self.flags.classes):
                        tf.summary.histogram(name='pred%d'%cl, values=tf.slice(self.logit, [0,cl],[self.flags.batch_size, 1]), collections=[tf.GraphKeys.FEATURE_MAPS])
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer

        # add placeholder (X,label)
        #X:input_x e.g. "how much is the computer? EOS price of laptop"
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X:  concat of two sentence, split by EOS.
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001,use_residual_conn=False):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size], name="input_y_label")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.use_residual_conn=use_residual_conn

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")
        correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32),self.input_y_label)
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy")  # shape=()
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self, filter_sizes,num_filters,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,
                 embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.filter_sizes=filter_sizes # it is a list of int. e.g. [3,4,5]
        self.num_filters=num_filters
        self.initializer=initializer
        self.num_filters_total=self.num_filters * len(filter_sizes) #how many filters totally.

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X: first sentence
        self.input_x2 = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x2")  # X: second sentence
        self.input_y = tf.placeholder(tf.int32, [None,],name="input_y")  # y: 0 or 1. 1 means two sentences related to each other;0 means no relation.
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, 1, name="predictions")  # shape:[None,]

        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self, label_size, learning_rate, batch_size, decay_steps, decay_rate,num_sampled,sentence_len,vocab_size,embed_size,is_training):
        """init all hyperparameter here"""
        # set hyperparamter
        self.label_size = label_size
        self.batch_size = batch_size
        self.num_sampled = num_sampled
        self.sentence_len=sentence_len
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate

        # add placeholder (X,label)
        self.sentence = tf.placeholder(tf.int32, [None, self.sentence_len], name="sentence")  # X
        self.labels = tf.placeholder(tf.int32, [None], name="Labels")  # y

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.labels) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer
        self.num_sampled=20

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss() #-->self.loss_nce()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_loss(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)

        correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        return total_loss
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build_loss(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)
        correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        return total_loss
项目:C3D-tensorflow    作者:hx173149    | 项目源码 | 文件源码
def tower_acc(logit, labels):
  correct_pred = tf.equal(tf.argmax(logit, 1), labels)
  accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  return accuracy
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, q_test
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
项目:adversarial-deep-structural-networks    作者:wentaozhu    | 项目源码 | 文件源码
def model(X, Y, k1, k2, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras, fusion=False)
    #h1, h2, h3, h4 = tf.split(3, 4, hconv4clip)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10, wunary=paras['wunary'])
  else: 
    hconv4clip = buildmodel(X, paras)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10)
  #hconv4log = -tf.log(hconv4clip)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  #q_train = -tf.log(hconv4clip)
  q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.)
  trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])

  #q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg