Python tensorflow 模块,bool() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.bool()

项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def switch(condition, then_tensor, else_tensor):
    """
    Keras' implementation of switch for tensorflow uses tf.switch which accepts only scalar conditions.
    It should use tf.select instead.
    """
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        condition_shape = condition.get_shape()
        input_shape = then_tensor.get_shape()
        if condition_shape[-1] != input_shape[-1] and condition_shape[-1] == 1:
            # This means the last dim is an embedding dim. Keras does not mask this dimension. But tf wants
            # the condition and the then and else tensors to be the same shape.
            condition = K.dot(tf.cast(condition, tf.float32), tf.ones((1, input_shape[-1])))
        return tf.select(tf.cast(condition, dtype=tf.bool), then_tensor, else_tensor)
    else:
        import theano.tensor as T
        return T.switch(condition, then_tensor, else_tensor)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def build_model(inp_ph, train_flag_ph, lbl_ph, mask_ph):
    head = build_graph(inp_ph, train_flag_ph, 32, 5, 3)
    pred = yolo_prediction(head)
    loss = yolo_loss(lbl_ph, pred, mask_ph)

    return loss

#
# input_tensor = tf.placeholder(dtype=tf.float32, shape=(None, 416, 416, 3))
# train_flag = tf.placeholder(dtype=tf.bool)
# labels = tf.placeholder(tf.float32, shape=(None, 13, 13, 5, 7))
# mask = tf.placeholder(tf.bool, shape=(None, 13, 13, 5))
#
# head = build_graph(input_tensor, train_flag, 32, 5, 3)
# pred = yolo_prediction(head)
# loss = yolo_loss(labels, pred, mask)
项目:keras-image-captioning    作者:danieljl    | 项目源码 | 文件源码
def clip_norm(g, c, n):
    if c > 0:
        if K.backend() == 'tensorflow':
            import tensorflow as tf
            import copy
            condition = n >= c
            then_expression = tf.scalar_mul(c / n, g)
            else_expression = g

            if hasattr(then_expression, 'get_shape'):
                g_shape = copy.copy(then_expression.get_shape())
            elif hasattr(then_expression, 'dense_shape'):
                g_shape = copy.copy(then_expression.dense_shape)
            if condition.dtype != tf.bool:
                condition = tf.cast(condition, 'bool')
            g = K.tensorflow_backend.control_flow_ops.cond(
                condition, lambda: then_expression, lambda: else_expression)
            if hasattr(then_expression, 'get_shape'):
                g.set_shape(g_shape)
            elif hasattr(then_expression, 'dense_shape'):
                g._dense_shape = g_shape
        else:
            g = K.switch(n >= c, g * c / n, g)
    return g
项目:IntelAct-Vizdoom    作者:chendagui16    | 项目源码 | 文件源码
def __make_net(self, input_images, input_measure, input_actions, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        fc_val_params = copy.deepcopy(self.__fc_joint_params)
        fc_val_params[-1]['out_dims'] = self.__target_dim

        fc_adv_params = copy.deepcopy(self.__fc_joint_params)
        fc_adv_params[-1]['out_dims'] = len(self.__net_discrete_actions) * self.__target_dim

        if self.verbose:
            print 'fc_val_params:', fc_val_params
            print 'fc_adv_params:', fc_adv_params

        p_img_conv = ly.conv_encoder(input_images, self.__conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = ly.fc_net(ly.flatten(p_img_conv), self.__fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = ly.fc_net(input_measure, self.__fc_measure_params, 'p_meas_fc', msra_coeff=0.9)
        p_val_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True)

        self.__pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.__net_discrete_actions), self.__target_dim])
        self.__pred_all = self.__pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.__target_dim])
        self.__pred_relevant = tf.boolean_mask(self.__pred_all, tf.cast(input_actions, tf.bool))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def switch(condition, then_expression, else_expression):
    '''Switches between two operations
    depending on a scalar value (int or bool).
    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor.
        then_expression: TensorFlow operation.
        else_expression: TensorFlow operation.
    '''
    x_shape = copy.copy(then_expression.get_shape())
    if condition.dtype != tf.bool:
        condition = tf.cast(condition, 'bool')
    x = _cond(condition,
              lambda: then_expression,
              lambda: else_expression)
    x.set_shape(x_shape)
    return x
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type bool. output_i is True if
        targets_i is within top-k values of predictions_i
    '''
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, initial_stepsize, adapt_step_size, gamma, t0, kappa,
                 delta):
        with tf.name_scope("StepsizeTuner"):
            self.adapt_step_size = tf.convert_to_tensor(
                adapt_step_size, dtype=tf.bool, name="adapt_step_size")
            self.initial_stepsize = initial_stepsize

            self.gamma = tf.convert_to_tensor(gamma, dtype=tf.float32,
                                              name="gamma")
            self.t0 = tf.convert_to_tensor(t0, dtype=tf.float32, name="t0")
            self.kappa = tf.convert_to_tensor(kappa, dtype=tf.float32,
                                              name="kappa")
            self.delta = tf.convert_to_tensor(delta, dtype=tf.float32,
                                              name="delta")
            self.mu = tf.constant(10 * initial_stepsize, dtype=tf.float32,
                                  name="mu")

            self.step = tf.Variable(0.0, dtype=tf.float32,
                                    name="step", trainable=False)
            self.log_epsilon_bar = tf.Variable(
                0.0, dtype=tf.float32, name="log_epsilon_bar", trainable=False)
            self.h_bar = tf.Variable(0.0, dtype=tf.float32,
                                     name="h_bar", trainable=False)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder to enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:dynamic-training-bench    作者:galeone    | 项目源码 | 文件源码
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def init_training_mode():
    """  init_training_mode.

    Creates `is_training` variable and its ops if they haven't be created
    yet. This op is required if you are using layers such as dropout or
    batch normalization independently of TFLearn models (DNN or Trainer class).

    """
    # 'is_training' collection stores the training mode variable
    coll = tf.get_collection('is_training')
    if len(coll) == 0:
        tr_var = variable(
            "is_training", dtype=tf.bool, shape=[],
            initializer=tf.constant_initializer(False),
            trainable=False)
        tf.add_to_collection('is_training', tr_var)
        # 'is_training_ops' stores the ops to update training mode variable
        a = tf.assign(tr_var, True)
        b = tf.assign(tr_var, False)
        tf.add_to_collection('is_training_ops', a)
        tf.add_to_collection('is_training_ops', b)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self, model_type="wide+deep", verbose=None, name=None, tensorboard_verbose=3, 
                 wide_learning_rate=0.001, deep_learning_rate=0.001, checkpoints_dir=None):
        '''
        model_type = `str`: wide or deep or wide+deep
        verbose = `bool`
        name = `str` used for run_id (defaults to model_type)
        tensorboard_verbose = `int`: logging level for tensorboard (0, 1, 2, or 3)
        wide_learning_rate = `float`: defaults to 0.001
        deep_learning_rate = `float`: defaults to 0.001
        checkpoints_dir = `str`: where checkpoint files will be stored (defaults to "CHECKPOINTS")
        '''
        self.model_type = model_type or "wide+deep"
        assert self.model_type in self.AVAILABLE_MODELS
        self.verbose = verbose or 0
        self.tensorboard_verbose = tensorboard_verbose
        self.name = name or self.model_type # name is used for the run_id
        self.data_columns = COLUMNS
        self.continuous_columns = CONTINUOUS_COLUMNS
        self.categorical_columns = CATEGORICAL_COLUMNS  # dict with category_name: category_size
        self.label_column = LABEL_COLUMN
        self.checkpoints_dir = checkpoints_dir or "CHECKPOINTS"
        if not os.path.exists(self.checkpoints_dir):
            os.mkdir(self.checkpoints_dir)
            print("Created checkpoints directory %s" % self.checkpoints_dir)
        self.build_model([wide_learning_rate, deep_learning_rate])
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def setUp(self):
    super(AttentiveReadTest, self).setUp()

    self._batch_size = 3
    self._memory_size = 4
    self._memory_word_size = 1
    self._query_word_size = 2
    self._memory = tf.reshape(
        tf.cast(tf.range(0, 3 * 4 * 1), dtype=tf.float32), shape=[3, 4, 1])
    self._query = tf.reshape(
        tf.cast(tf.range(0, 3 * 2), dtype=tf.float32), shape=[3, 2])
    self._memory_mask = tf.convert_to_tensor(
        [
            [True, True, True, True],
            [True, True, True, False],
            [True, True, False, False],
        ],
        dtype=tf.bool)
    self._attention_logit_mod = ConstantZero()
    self._attention_mod = snt.AttentiveRead(self._attention_logit_mod)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testNoMemorySlotsLeft(self):
    # Every example must have at least one unmasked memory slot for attention
    # to work.
    memory_mask = tf.convert_to_tensor(
        [
            [True, True, True, True],
            [True, True, True, False],
            [False, False, False, False],
        ],
        dtype=tf.bool)
    attention_output = self._attention_mod(
        self._memory, self._query, memory_mask=memory_mask)
    x = attention_output.read
    with self.test_session() as sess:
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(x)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def _from_domain_dict(domain):
  """Translate a JSON domain dict into a Domain."""
  if domain.get('ints') is not None:
    def maybe_to_int(s):
      return int(s) if s is not None else None
    return sch.IntDomain(
        tf.int64,
        maybe_to_int(domain['ints'].get('min')),
        maybe_to_int(domain['ints'].get('max')),
        domain['ints'].get('isCategorical'),
        domain['ints'].get('vocabularyFile', ''))
  if domain.get('floats') is not None:
    return sch.FloatDomain(tf.float32)
  if domain.get('strings') is not None:
    return sch.StringDomain(tf.string)
  if domain.get('bools') is not None:
    return sch.BoolDomain(tf.bool)
  raise ValueError('Unknown domain: {}'.format(domain))
项目:DirectFuturePrediction    作者:IntelVCL    | 项目源码 | 文件源码
def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        self.fc_joint_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
        p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
        if isinstance(self.fc_obj_params, np.ndarray):
            p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
        else:
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
            if self.random_objective_coeffs:
                raise Exception('Need fc_obj_params with randomized objectives')

        p_joint_fc = my_ops.fc_net(p_concat_fc, self.fc_joint_params, 'p_joint_fc', last_linear=True, msra_coeff=0.9)
        pred_all = tf.reshape(p_joint_fc, [-1, len(self.net_discrete_actions), self.target_dim])
        pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))

        return pred_all, pred_relevant
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def test_output(self):
        """Test the DynamicDecoder.output() method."""

        helper = mock.Mock()
        decoder = mock.Mock()
        zero_output = tf.constant([[0, 0, 0], [0, 0, 0]], dtype=tf.float32)
        decoder.zero_output.side_effect = [zero_output]

        output = tf.constant([[23, 23, 23], [23, 23, 23]], dtype=tf.float32)
        finished = tf.constant([True, False], dtype=tf.bool)

        dyndec = layers.DynamicDecoder(decoder, helper)
        act_output_t = dyndec.output(output, finished)
        exp_output = np.asarray([[0, 0, 0], [23, 23, 23]], dtype=np.float32) # pylint: disable=I0011,E1101

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            act_output = sess.run(act_output_t)

        helper.finished.assert_not_called()
        decoder.zero_output.assert_called_once()
        self.assertAllEqual(exp_output, act_output)
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def test_condition(self):
        """Test the DynamicDecoder.condition() method."""

        helper = mock.Mock()
        decoder = mock.Mock()

        dyndec = layers.DynamicDecoder(decoder, helper)
        finished = [(tf.constant([True], dtype=tf.bool), False),
                    (tf.constant([False], dtype=tf.bool), True),
                    (tf.constant([True, False], dtype=tf.bool), True),
                    (tf.constant([True, True], dtype=tf.bool), False)]

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for tensor, expected in finished:
                actual = sess.run(dyndec.cond(None, None, None, tensor, None))
                self.assertEqual(expected, actual)

        helper.assert_not_called()
        decoder.assert_not_called()
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def switch(self, condition, then_expression, else_expression):
        '''Switches between two operations depending on a scalar value (int or bool).
        Note that both `then_expression` and `else_expression`
        should be symbolic tensors of the *same shape*.

        # Arguments
            condition: scalar tensor.
            then_expression: TensorFlow operation.
            else_expression: TensorFlow operation.
        '''
        x_shape = copy.copy(then_expression.get_shape())
        x = tf.python.control_flow_ops.cond(self.cast(condition, 'bool'),
                                            lambda: then_expression,
                                            lambda: else_expression)
        x.set_shape(x_shape)
        return x
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def var(x, axis=None, keepdims=False):
    """Variance of a tensor, alongside the specified axis.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to compute the variance.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    # Returns
        A tensor with the variance of elements of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    if x.dtype.base_dtype == tf.bool:
        x = tf.cast(x, floatx())
    m = tf.reduce_mean(x, reduction_indices=axis, keep_dims=True)
    devs_squared = tf.square(x - m)
    return tf.reduce_mean(devs_squared,
                          reduction_indices=axis,
                          keep_dims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def mean(x, axis=None, keepdims=False):
    """Mean of a tensor, alongside the specified axis.

    # Arguments
        x: A tensor or variable.
        axis: A list of integer. Axes to compute the mean.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1 for each entry in `axis`. If `keep_dims` is `True`,
            the reduced dimensions are retained with length 1.

    # Returns
        A tensor with the mean of elements of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    if x.dtype.base_dtype == tf.bool:
        x = tf.cast(x, floatx())
    return tf.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape `batch_size` x classes and type `float32`.
        targets: A tensor of shape batch_size and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A tensor of shape `batch_size` and type `bool`. `output_i` is `True` if
        `targets_i` is within top-k values of `predictions_i`
    """
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def create_initial_beam_state(config):
    """Creates an instance of `BeamState` that can be used on the first
    call to `beam_step`.

    Args:
      config: A BeamSearchConfig

    Returns:
      An instance of `BeamState`.
    """
    return BeamSearchState(
        log_probs=tf.zeros([config.beam_width]),
        finished=tf.zeros(
            [config.beam_width], dtype=tf.bool),
        lengths=tf.zeros(
            [config.beam_width], dtype=tf.int32))
项目:mgail    作者:itaicaspi    | 项目源码 | 文件源码
def step(self, action, mode):
        qvel, qpos = [], []
        if mode == 'tensorflow':
            if self.random_initialization:
                state, reward, done, qval, qpos = tf.py_func(self._step, inp=[action], Tout=[tf.float32, tf.float32, tf.bool, tf.float32, tf.float32], name='env_step_func')
            else:
                state, reward, done = tf.py_func(self._step, inp=[action],
                                                 Tout=[tf.float32, tf.float32, tf.bool],
                                                 name='env_step_func')

            state = tf.reshape(state, shape=(self.state_size,))
            done.set_shape(())
        else:
            if self.random_initialization:
                state, reward, done, qvel, qpos = self._step(action)
            else:
                state, reward, done = self._step(action)

        return state, reward, done, 0., qvel, qpos
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSingleUpdateSomeMissingKIs2(self):
    predictions = tf.constant(self._np_predictions,
                              shape=(self._batch_size, self._num_classes),
                              dtype=tf.float32)
    labels = tf.constant(self._np_labels, shape=(self._batch_size,))
    weights = tf.constant([0, 1, 1, 1], shape=(self._batch_size,),
                          dtype=tf.float32)
    mask = tf.constant([False, False, True, False], shape=(self._batch_size,),
                       dtype=tf.bool)
    recall, update_op = metrics.streaming_recall_at_k(
        predictions, labels, k=2, ignore_mask=mask, weights=weights)

    with self.test_session() as sess:
      sess.run(tf.initialize_local_variables())
      self.assertEqual(1.0, sess.run(update_op))
      self.assertEqual(1.0, recall.eval())
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testSomePresentOneUpdate(self):
    with self.test_session() as sess:
      values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
      mask = tf.constant([False, True, False, False], shape=(1, 4),
                         dtype=tf.bool)
      weights = tf.constant([1, 1, 0, 1], shape=(1, 4), dtype=tf.float32)

      pcnt0, update_op0 = metrics.streaming_percentage_less(
          values, 100, ignore_mask=mask, weights=weights, name='high')
      pcnt1, update_op1 = metrics.streaming_percentage_less(
          values, 7, ignore_mask=mask, weights=weights, name='medium')
      pcnt2, update_op2 = metrics.streaming_percentage_less(
          values, 1, ignore_mask=mask, weights=weights, name='low')

      sess.run(tf.initialize_local_variables())
      self.assertListEqual([1.0, 0.5, 0.0],
                           sess.run([update_op0, update_op1, update_op2]))

      pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
      self.assertAlmostEqual(1.0, pcnt0, 5)
      self.assertAlmostEqual(0.5, pcnt1, 5)
      self.assertAlmostEqual(0.0, pcnt2, 5)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def switch(condition, then_expression, else_expression):
    '''Switches between two operations
    depending on a scalar value (int or bool).
    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor.
        then_expression: TensorFlow operation.
        else_expression: TensorFlow operation.
    '''
    x_shape = copy.copy(then_expression.get_shape())
    if condition.dtype != tf.bool:
        condition = tf.cast(condition, 'bool')
    x = _cond(condition,
              lambda: then_expression,
              lambda: else_expression)
    x.set_shape(x_shape)
    return x
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type bool. output_i is True if
        targets_i is within top-k values of predictions_i
    '''
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
    """Mike Henry's implementation, with some minor modifications."""
    with self.G.as_default():
      label_shape = tf.shape( labels )
      num_batches_tns = tf.stack( [label_shape[0]] )
      max_num_labels_tns = tf.stack( [label_shape[1]] )

      def range_less_than(previous_state, current_input):
        return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input

      init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
      init = tf.expand_dims( init, 0 )
      dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[ :, 0, : ]

      label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
      label_ind = tf.boolean_mask( label_array, dense_mask )

      batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0,  label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
      batch_ind = tf.boolean_mask( batch_array, dense_mask )

      indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
      vals_sparse = tf.gather_nd( labels, indices )
      return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def prepare_reader(self, filename_queue):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={
            "video_id": tf.FixedLenFeature([], tf.string),
            "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            "rgb": tf.FixedLenSequenceFeature([], dtype=tf.string),
            "audio": tf.FixedLenSequenceFeature([], dtype=tf.string),
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    rgbs, num_frames = self.get_video_matrix(features["rgb"], 1024, self.max_frames)
    audios, num_frames = self.get_video_matrix(features["audio"], 1024, self.max_frames)

    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_rgbs = tf.expand_dims(rgbs, 0)
    batch_audios = tf.expand_dims(audios, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_rgbs, batch_audios, batch_labels, batch_frames
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True):
        assert isinstance(ob_space, gym.spaces.Box)

        self.pdtype = pdtype = make_pdtype(ac_space)
        sequence_length = None

        ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))

        # with tf.variable_scope("obfilter"):
        #     self.ob_rms = RunningMeanStd(shape=ob_space.shape)
        # Kiwoo: Currently block MPI, need to consider entire code in here 

        obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
        last_out = obz
        for i in range(num_hid_layers):
            last_out = tf.nn.tanh(U.dense(last_out, hid_size, "vffc%i"%(i+1), weight_init=U.normc_initializer(1.0)))
        self.vpred = U.dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:,0]

        last_out = obz
        for i in range(num_hid_layers):
            last_out = tf.nn.tanh(U.dense(last_out, hid_size, "polfc%i"%(i+1), weight_init=U.normc_initializer(1.0)))
        if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
            mean = U.dense(last_out, pdtype.param_shape()[0]//2, "polfinal", U.normc_initializer(0.01))            
            logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
            pdparam = U.concatenate([mean, mean * 0.0 + logstd], axis=1)
        else:
            pdparam = U.dense(last_out, pdtype.param_shape()[0], "polfinal", U.normc_initializer(0.01))

        self.pd = pdtype.pdfromflat(pdparam)

        self.state_in = []
        self.state_out = []

        stochastic = tf.placeholder(dtype=tf.bool, shape=())
        ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
        self._act = U.function([stochastic, ob], [ac, self.vpred])
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_step(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.constant(
            2, shape=[self.config.beam_width], dtype=tf.int32),
        finished=tf.zeros(
            [self.config.beam_width], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 2] = 1.9
    logits_[0, 3] = 2.1
    logits_[1, 3] = 3.1
    logits_[1, 4] = 0.9
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 2])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
    np.testing.assert_array_equal(next_state_.lengths, [3, 3, 3])
    np.testing.assert_array_equal(next_state_.finished, [False, False, False])

    expected_log_probs = state_.log_probs[[1, 0, 0]]
    expected_log_probs[0] += log_probs_[1, 3]
    expected_log_probs[1] += log_probs_[0, 3]
    expected_log_probs[2] += log_probs_[0, 2]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_step_with_eos(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.convert_to_tensor(
            [2, 1, 2], dtype=tf.int32),
        finished=tf.constant(
            [False, True, False], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 2] = 1.1
    logits_[1, 2] = 1.0
    logits_[2, 2] = 1.0
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [0, 2, 2])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 2])
    np.testing.assert_array_equal(next_state_.lengths, [1, 3, 3])
    np.testing.assert_array_equal(next_state_.finished, [True, False, False])

    expected_log_probs = state_.log_probs[outputs_.beam_parent_ids]
    expected_log_probs[1] += log_probs_[0, 2]
    expected_log_probs[2] += log_probs_[2, 2]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def test_step_with_new_eos(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.constant(
            2, shape=[self.config.beam_width], dtype=tf.int32),
        finished=tf.zeros(
            [self.config.beam_width], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 0] = 1.9
    logits_[0, 3] = 2.1
    logits_[1, 3] = 3.1
    logits_[1, 4] = 0.9
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 0])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
    np.testing.assert_array_equal(next_state_.lengths, [3, 3, 2])
    np.testing.assert_array_equal(next_state_.finished, [False, False, True])

    expected_log_probs = state_.log_probs[[1, 0, 0]]
    expected_log_probs[0] += log_probs_[1, 3]
    expected_log_probs[1] += log_probs_[0, 3]
    expected_log_probs[2] += log_probs_[0, 0]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
项目:benchmarks    作者:tensorflow    | 项目源码 | 文件源码
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
    """Adds ops to enqueue on all worker queues.

    Args:
      name_prefix: prefixed for the shared_name of ops.
      enqueue_after_list: control dependency from ops.

    Returns:
      an op that should be used as control dependency before starting next step.
    """
    self.sync_queue_counter += 1
    with tf.device(self.sync_queue_devices[(
        self.sync_queue_counter % len(self.sync_queue_devices))]):
      sync_queues = [
          tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
                       shared_name='%s%s' % (name_prefix, i))
          for i in range(self.num_workers)]
      queue_ops = []
      # For each other worker, add an entry in a queue, signaling that it can
      # finish this step.
      token = tf.constant(False)
      with tf.control_dependencies(enqueue_after_list):
        for i, q in enumerate(sync_queues):
          if i == self.task_index:
            queue_ops.append(tf.no_op())
          else:
            queue_ops.append(q.enqueue(token))

      # Drain tokens off queue for this worker, one for each other worker.
      queue_ops.append(
          sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))

      return tf.group(*queue_ops)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def create_inputs(self, image_size):
        print("Creating input Placeholders...")
        #input image
        self.X = tf.placeholder(dtype=tf.float32, 
            shape=[None, image_size[0], image_size[1], 1],
            name="in")

        #Outputs of the different heads

        #Nodule head
        self.Y_nodule = tf.placeholder(dtype=tf.float32,
            shape=[None, image_size[0], image_size[1], 1],
            name="out_nodule")
        self.Y_nodule_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="nodule_weight")

        #Cancer head
        self.Y_cancer = tf.placeholder(dtype=tf.float32,
            shape=[None, 1],
            name="out_cancer")
        self.Y_cancer_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="cancer_weight")

        #Boolean variables to check head and mode
        self.is_training = tf.placeholder(dtype=tf.bool,
            name="is_training")
        self.is_nodule = tf.placeholder(dtype=tf.bool,
            name="is_nodule")
        self.is_cancer = tf.placeholder(dtype=tf.bool,
            name="is_cancer")

        #Probability for dropout
        self.drop_prob = tf.placeholder_with_default(input=0.0,
            shape=None,
            name="dropout_probability")

        print("Created input placeholders!")
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def _build(self):
        V = self.V # vocabulary size
        M = self.flags.embedding_size # 64
        C = self.flags.classes
        W = self.flags.window_size
        S = self.flags.seq_len*2+1
        B = self.flags.batch_size
        H = 32
        is_training = tf.placeholder(dtype=tf.bool)
        netname = "CBOW"
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(dtype=tf.int32,shape=[None, S]) #[B,S]
            # each element is a word id.

            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, M, reuse=False) # [B, S, M]       
        netname = "BaoBaoMiaoCnn"
        with tf.variable_scope(netname):
            x = tf.expand_dims(x, axis=3) # [B,S,M,1]

            net1 = self.conv_maxpool(x,W,M,S,H,"%s/conv1"%netname,1)     # [B,1,1,16]
            net2 = self.conv_maxpool(x,W*2,M,S,H,"%s/conv2"%netname,1)
            net3 = self.conv_maxpool(x,W//2,M,S,H,"%s/conv3"%netname,1)
            net = tf.concat([net1,net2,net3],axis=3) # [B,1,1,48]
            net = self._batch_normalization(net, layer_name='%s/batch_norm1'%(netname))
            net = tf.squeeze(net) # [B,48]
            #net = self._fc(net, fan_in=H*3, fan_out=H, layer_name="%s/fc0"%netname, activation='relu')
            net = self._fc(net, fan_in=H*3, fan_out=C, layer_name="%s/fc1"%netname, activation=None)
            self.logit = net
        self.is_training = is_training
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,decoder_sent_length=6,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.decoder_sent_length=decoder_sent_length
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.decoder_input = tf.placeholder(tf.int32, [self.batch_size, self.decoder_sent_length],name="decoder_input")  #y, but shift None
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size, self.decoder_sent_length], name="input_y_label") #y, but shift None
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,decoder_sent_length,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=2, name="predictions")
        self.accuracy = tf.constant(0.5)  # fuke accuracy. (you can calcuate accuracy outside of graph using method calculate_accuracy(...) in train.py)
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss_seq2seq()
        self.train_op = self.train()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001,use_residual_conn=False):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size], name="input_y_label")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.use_residual_conn=use_residual_conn

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")
        correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32),self.input_y_label)
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy")  # shape=()
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
项目:DeepPath    作者:xwhan    | 项目源码 | 文件源码
def __init__(self, scope = 'policy_network', learning_rate = 0.001):
        self.initializer = tf.contrib.layers.xavier_initializer()
        with tf.variable_scope(scope):
            self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
            self.action = tf.placeholder(tf.int32, [None], name = 'action')
            self.target = tf.placeholder(tf.float32, name = 'target')
            self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)

            action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
            self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)

            self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)*self.target) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=scope))
            self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
            self.train_op = self.optimizer.minimize(self.loss)
项目:DeepPath    作者:xwhan    | 项目源码 | 文件源码
def __init__(self, learning_rate = 0.001):
        self.initializer = tf.contrib.layers.xavier_initializer()
        with tf.variable_scope('supervised_policy'):
            self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
            self.action = tf.placeholder(tf.int32, [None], name = 'action')
            self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)

            action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
            self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)

            self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))
            self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
            self.train_op = self.optimizer.minimize(self.loss)