Python tensorflow 模块,convert_to_tensor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.convert_to_tensor()

项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:neurobind    作者:Kyubyong    | 项目源码 | 文件源码
def get_batch_data():
    # Load data
    X, Y = load_data()

    # calc total batch count
    num_batch = len(X) // hp.batch_size

    # Convert to tensor
    X = tf.convert_to_tensor(X, tf.int32)
    Y = tf.convert_to_tensor(Y, tf.float32)

    # Create Queues
    input_queues = tf.train.slice_input_producer([X, Y])

    # create batch queues
    x, y = tf.train.batch(input_queues,
                          num_threads=8,
                          batch_size=hp.batch_size,
                          capacity=hp.batch_size * 64,
                          allow_smaller_final_batch=False)

    return x, y, num_batch  # (N, T), (N, T), ()
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def lengths_to_mask(lengths_b, max_length):
    """
    Turns a vector of lengths into a boolean mask

    Args:
        lengths_b: an integer vector of lengths
        max_length: maximum length to fill the mask

    Returns:
        a boolean array of shape (batch_size, max_length)
        row[i] consists of True repeated lengths_b[i] times, followed by False
    """
    lengths_b = tf.convert_to_tensor(lengths_b)
    assert lengths_b.get_shape().ndims == 1
    mask_bt = tf.expand_dims(tf.range(max_length), 0) < tf.expand_dims(lengths_b, 1)
    return mask_bt
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def create_decoder(self, helper, mode):
    attention_fn = AttentionLayerDot(
        params={"num_units": self.attention_dim},
        mode=tf.contrib.learn.ModeKeys.TRAIN)
    attention_values = tf.convert_to_tensor(
        np.random.randn(self.batch_size, self.input_seq_len, 32),
        dtype=tf.float32)
    attention_keys = tf.convert_to_tensor(
        np.random.randn(self.batch_size, self.input_seq_len, 32),
        dtype=tf.float32)
    params = AttentionDecoder.default_params()
    params["max_decode_length"] = self.max_decode_length
    return AttentionDecoder(
        params=params,
        mode=mode,
        vocab_size=self.vocab_size,
        attention_keys=attention_keys,
        attention_values=attention_values,
        attention_values_length=np.arange(self.batch_size) + 1,
        attention_fn=attention_fn)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def setUp(self):
    super(BridgeTest, self).setUp()
    self.batch_size = 4
    self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
    self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
    final_encoder_state = nest.map_structure(
        lambda x: tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, x),
            dtype=tf.float32),
        self.encoder_cell.state_size)
    self.encoder_outputs = EncoderOutput(
        outputs=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values_length=np.full([self.batch_size], 10),
        final_state=final_encoder_state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _test_with_residuals(self, inputs, **kwargs):
    """Runs the cell in a session"""
    inputs = tf.convert_to_tensor(inputs)
    state = (tf.constant(np.random.randn(1, 2)),
             tf.constant(np.random.randn(1, 2)))

    with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
      test_cell = rnn_cell.ExtendedMultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)],
          residual_connections=True,
          **kwargs)
      res_test = test_cell(inputs, state, scope="test")

    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      return sess.run(res_test)
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def run_bilateral_slice_apply(self, dev, grid_data, guide_data, input_data, has_offset=False):
    with tf.device(dev):

      grid_tensor = tf.convert_to_tensor(
          grid_data, name='grid', dtype=tf.float32)
      guide_tensor = tf.convert_to_tensor(
          guide_data, name='guide', dtype=tf.float32)
      input_tensor = tf.convert_to_tensor(
          input_data, name='input', dtype=tf.float32)

      output_tensor = ops.bilateral_slice_apply(grid_tensor, guide_tensor, input_tensor, has_offset=has_offset)

    with self.test_session() as sess:
      output_data = sess.run(output_tensor)

    return output_data
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def get_label_queue(self,batch_size):
        tf_labels = tf.convert_to_tensor(self.attr.values, dtype=tf.uint8)#0,1

        with tf.name_scope('label_queue'):
            uint_label=tf.train.slice_input_producer([tf_labels])[0]
        label=tf.to_float(uint_label)

        #All labels, not just those in causal_model
        dict_data={sl:tl for sl,tl in
                   zip(self.label_names,tf.split(label,len(self.label_names)))}


        num_preprocess_threads = max(self.num_worker-3,1)

        data_batch = tf.train.shuffle_batch(
                dict_data,
                batch_size=batch_size,
                num_threads=num_preprocess_threads,
                capacity=self.min_queue_examples + 3 * batch_size,
                min_after_dequeue=self.min_queue_examples,
                )

        return data_batch
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def l2_loss(tensor, weight=1.0, scope=None, normalize=False):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    if normalize:
      loss = tf.sqrt( (tf.sqrt( tf.nn.l2_loss(tensor)) / tf.to_float(tf.size(tensor)))  , name='value')
    else:
      loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')

    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:Tensormodels    作者:asheshjain399    | 项目源码 | 文件源码
def sparse_cross_entropy_loss(logits, labels,
                       weight=1.0, scope=None):
  """Define a Cross Entropy loss using sparse_softmax_cross_entropy_with_logits.

  It can scale the loss by weight factor, and smooth the labels.

  Args:
    logits: [batch_size, num_classes] logits outputs of the network .
    labels: [batch_size,] target labels.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    A tensor with the softmax_cross_entropy loss.
  """
  with tf.op_scope([logits, labels], scope, 'SparseCrossEntropyLoss'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,name='xentropy')
    weight = tf.convert_to_tensor(weight,
                                    dtype=logits.dtype.base_dtype,
                                    name='loss_weight')

    loss = tf.mul(weight, tf.reduce_mean(cross_entropy), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def _largest_size_at_most(height, width, largest_side):
  """Computes new shape with the largest side equal to `largest_side`.
  Computes new shape with the largest side equal to `largest_side` while
  preserving the original aspect ratio.
  Args:
    height: an int32 scalar tensor indicating the current height.
    width: an int32 scalar tensor indicating the current width.
    largest_side: A python integer or scalar `Tensor` indicating the size of
      the largest side after resize.
  Returns:
    new_height: an int32 scalar tensor indicating the new height.
    new_width: and int32 scalar tensor indicating the new width.
  """
  largest_side = tf.convert_to_tensor(largest_side, dtype=tf.int32)

  height = tf.to_float(height)
  width = tf.to_float(width)
  largest_side = tf.to_float(largest_side)

  scale = tf.cond(tf.greater(height, width),
                  lambda: largest_side / height,
                  lambda: largest_side / width)
  new_height = tf.to_int32(height * scale)
  new_width = tf.to_int32(width * scale)
  return new_height, new_width
项目:tf-sr-zoo    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def imgread(img_path, scale = 4):
    img = scipy.misc.imread(img_path)
    img = img /256.0
    h,w,c = img.shape
    tmp1 = h % scale
    new_h = h + scale - tmp1
    tmp2 = w % scale
    new_w = w +scale-tmp2
    img = np.pad(img, ((0,scale-tmp1), (0, scale-tmp2),(0,0)), mode = 'reflect')
    if scale != None:
        img = np.expand_dims(img,0)
        img = tf.convert_to_tensor(img)
        lr_w = new_w / scale
        lr_h = new_h /scale
        img = tf.cast(img, tf.float32)
        img_lr = tf.image.resize_images(img, [lr_h, lr_w])
        img_lr = tf.cast(img_lr,tf.float32)
        return img_lr, img
    return img
项目:tf-sr-zoo    作者:MLJejuCamp2017    | 项目源码 | 文件源码
def imgread(img_path, scale = 4):
    img = scipy.misc.imread(img_path)
    #img = scipy.misc.imresize(img, (128, 128))
    img = img /256.0
    h,w,c = img.shape
    new_h = pow(2, int(math.log(h, 2))+1)
    tmp1 = new_h - h 
    new_w = pow(2, int(math.log(w, 2))+1)
    tmp2 = new_w - w
    img = np.pad(img, ((0,tmp1), (0, tmp2),(0,0)), mode = 'constant')
    if scale != None:
        img = np.expand_dims(img,0)
        img = tf.convert_to_tensor(img)
        lr_w = new_w / scale
        lr_h = new_h /scale
        img = tf.cast(img, tf.float32)
        img_lr = tf.image.resize_images(img, [lr_h, lr_w])
        img_lr = tf.cast(img_lr,tf.float32)
        return img_lr, img
    return img
项目:neural_tokenizer    作者:Kyubyong    | 项目源码 | 文件源码
def get_batch_data():
    # Load data
    X, Y = load_data()

    # calc total batch count
    num_batch = len(X) // hp.batch_size

    # Convert to tensor
    X = tf.convert_to_tensor(X, tf.int32)
    Y = tf.convert_to_tensor(Y, tf.int32)

    # Create Queues
    input_queues = tf.train.slice_input_producer([X, Y])

    # create batch queues
    x, y = tf.train.batch(input_queues,
                          num_threads=8,
                          batch_size=hp.batch_size,
                          capacity=hp.batch_size * 64,
                          allow_smaller_final_batch=False)

    return x, y, num_batch  # (N, T), (N, T), ()
项目:AssociativeRetrieval    作者:jxwufan    | 项目源码 | 文件源码
def _fwlinear(self, args, output_size, scope=None):
    if args is None or (nest.is_sequence(args) and not args):
      raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
      args = [args]
    assert len(args) == 2
    assert args[0].get_shape().as_list()[1] == output_size

    dtype = [a.dtype for a in args][0]

    with vs.variable_scope(scope or "Linear"):
      matrixW = vs.get_variable(
        "MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))

      matrixC = vs.get_variable(
        "MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)

      res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
      return res
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def _aspect_preserving_resize(image, smallest_side):
  """Resize images preserving the original aspect ratio.

  Args:
    image: A 3-D image `Tensor`.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    resized_image: A 3-D tensor containing the resized image.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  shape = tf.shape(image)
  height = shape[0]
  width = shape[1]
  new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
  image = tf.expand_dims(image, 0)
  resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
                                           align_corners=False)
  resized_image = tf.squeeze(resized_image)
  resized_image.set_shape([None, None, 3])
  return resized_image
项目:seq2seq    作者:eske    | 项目源码 | 文件源码
def batch_gather(tensor, indices):
    """Gather in batch from a tensor of arbitrary size.

    In pseduocode this module will produce the following:
    output[i] = tf.gather(tensor[i], indices[i])

    Args:
      tensor: Tensor of arbitrary size.
      indices: Vector of indices.
    Returns:
      output: A tensor of gathered values.
    """
    shape = get_shape(tensor)
    flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])
    indices = tf.convert_to_tensor(indices)
    offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)
    offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)
    output = tf.gather(flat_first, indices + offset)
    return output
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_objective(self):
        log_qx = stats.norm.logpdf(self._n01_samples).astype(np.float32)
        qx_samples = tf.convert_to_tensor(self._n01_samples)
        log_qx = tf.convert_to_tensor(log_qx)

        def _check_elbo(x_mean, x_std):
            # check their elbo
            def log_joint(observed):
                norm = Normal(mean=x_mean, std=x_std)
                return norm.log_prob(observed['x'])

            lower_bound = elbo(log_joint, observed={},
                               latent={'x': [qx_samples, log_qx]}, axis=0)
            analytic_lower_bound = -_kl_normal_normal(0., 1., x_mean, x_std)
            with self.test_session(use_gpu=True) as sess:
                a = sess.run(lower_bound)
                b = sess.run(analytic_lower_bound)
                # print(a, b)
                self.assertNear(a, b, 1e-2)

        _check_elbo(0., 1.)
        _check_elbo(2., 3.)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_objective(self):
        log_qx = stats.norm.logpdf(self._n01_samples).astype(np.float32)
        qx_samples = tf.convert_to_tensor(self._n01_samples)
        log_qx = tf.convert_to_tensor(log_qx)

        def log_joint(observed):
            norm = Normal(std=1.)
            return norm.log_prob(observed['x'])

        lower_bound = klpq(log_joint, observed={},
                           latent={'x': [qx_samples, log_qx]}, axis=0)
        err_msg = "can only be optimized instead of being evaluated"
        with self.assertRaisesRegexp(NotImplementedError, err_msg):
            _ = lower_bound + 1.
        with self.test_session(use_gpu=True) as sess:
            with self.assertRaisesRegexp(NotImplementedError, err_msg):
                sess.run(lower_bound)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, name, distribution, n_samples, observed=None):
        self._name = name
        self._distribution = distribution
        self._n_samples = n_samples
        self._dtype = distribution.dtype
        if observed is not None:
            try:
                observed = tf.convert_to_tensor(observed, dtype=self.dtype)
            except ValueError as e:
                raise ValueError(
                    "StochasticTensor('{}') not compatible "
                    "with its observed value. Error message: {}".format(
                        self._name, e))
        self._observed = observed
        try:
            self._net = BayesianNet.get_context()
            self._net._add_stochastic_tensor(self)
        except RuntimeError:
            self._net = None
        super(StochasticTensor, self).__init__()
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def tensor(self):
        """
        Return corresponding Tensor through sampling, or if observed, return
        the observed value.

        :return: A Tensor.
        """
        if not hasattr(self, '_tensor'):
            if self._observed is not None:
                self._tensor = self._observed
            elif self._name in self._net.observed:
                try:
                    self._tensor = tf.convert_to_tensor(
                        self._net.observed[self._name], dtype=self._dtype)
                except ValueError as e:
                    raise ValueError(
                        "StochasticTensor('{}') not compatible "
                        "with its observed value. Error message: {}".format(
                            self._name, e))
            else:
                self._tensor = self.sample(self._n_samples)
        return self._tensor
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, initial_stepsize, adapt_step_size, gamma, t0, kappa,
                 delta):
        with tf.name_scope("StepsizeTuner"):
            self.adapt_step_size = tf.convert_to_tensor(
                adapt_step_size, dtype=tf.bool, name="adapt_step_size")
            self.initial_stepsize = initial_stepsize

            self.gamma = tf.convert_to_tensor(gamma, dtype=tf.float32,
                                              name="gamma")
            self.t0 = tf.convert_to_tensor(t0, dtype=tf.float32, name="t0")
            self.kappa = tf.convert_to_tensor(kappa, dtype=tf.float32,
                                              name="kappa")
            self.delta = tf.convert_to_tensor(delta, dtype=tf.float32,
                                              name="delta")
            self.mu = tf.constant(10 * initial_stepsize, dtype=tf.float32,
                                  name="mu")

            self.step = tf.Variable(0.0, dtype=tf.float32,
                                    name="step", trainable=False)
            self.log_epsilon_bar = tf.Variable(
                0.0, dtype=tf.float32, name="log_epsilon_bar", trainable=False)
            self.h_bar = tf.Variable(0.0, dtype=tf.float32,
                                     name="h_bar", trainable=False)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
        self._logits = tf.convert_to_tensor(logits)
        param_dtype = assert_same_float_dtype(
            [(self._logits, 'Bernoulli.logits')])

        if dtype is None:
            dtype = tf.int32
        assert_same_float_and_int_dtype([], dtype)

        super(Bernoulli, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
        self._logits = tf.convert_to_tensor(logits)
        param_dtype = assert_same_float_dtype(
            [(self._logits, 'Categorical.logits')])

        if dtype is None:
            dtype = tf.int32
        assert_same_float_and_int_dtype([], dtype)

        self._logits = assert_rank_at_least_one(
                self._logits, 'Categorical.logits')
        self._n_categories = get_shape_at(self._logits, -1)

        super(Categorical, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self,
                 rate,
                 dtype=None,
                 group_ndims=0,
                 check_numerics=False,
                 **kwargs):
        self._rate = tf.convert_to_tensor(rate)
        param_dtype = assert_same_float_dtype(
            [(self._rate, 'Poisson.rate')])

        if dtype is None:
            dtype = tf.int32
        assert_same_float_and_int_dtype([], dtype)

        self._check_numerics = check_numerics

        super(Poisson, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def __init__(self, logits, dtype=None, group_ndims=0, **kwargs):
        self._logits = tf.convert_to_tensor(logits)
        param_dtype = assert_same_float_dtype(
            [(self._logits, 'OnehotCategorical.logits')])

        if dtype is None:
            dtype = tf.int32
        assert_same_float_and_int_dtype([], dtype)

        self._logits = assert_rank_at_least_one(
            self._logits, 'OnehotCategorical.logits')
        self._n_categories = get_shape_at(self._logits, -1)

        super(OnehotCategorical, self).__init__(
            dtype=dtype,
            param_dtype=param_dtype,
            is_continuous=False,
            is_reparameterized=False,
            group_ndims=group_ndims,
            **kwargs)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _check_input_shape(self, given):
        given = tf.convert_to_tensor(given, dtype=self.dtype)

        err_msg = "The given argument should be able to broadcast to " \
                  "match batch_shape + value_shape of the distribution."
        if (given.get_shape() and self.get_batch_shape() and
                self.get_value_shape()):
            static_sample_shape = tf.TensorShape(
                self.get_batch_shape().as_list() +
                self.get_value_shape().as_list())
            try:
                tf.broadcast_static_shape(given.get_shape(),
                                          static_sample_shape)
            except ValueError:
                raise ValueError(
                    err_msg + " ({} vs. {} + {})".format(
                        given.get_shape(), self.get_batch_shape(),
                        self.get_value_shape()))
        return given
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
项目:tensorflow-deeplab-lfov    作者:DrSleep    | 项目源码 | 文件源码
def __init__(self, data_dir, data_list, input_size, random_scale, coord):
        '''Initialise an ImageReader.

        Args:
          data_dir: path to the directory with images and masks.
          data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
          input_size: a tuple with (height, width) values, to which all the images will be resized.
          random_scale: whether to randomly scale the images prior to random crop.
          coord: TensorFlow queue coordinator.
        '''
        self.data_dir = data_dir
        self.data_list = data_list
        self.input_size = input_size
        self.coord = coord

        self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list)
        self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
        self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
        self.queue = tf.train.slice_input_producer([self.images, self.labels],
                                                   shuffle=input_size is not None) # Not shuffling if it is val.
        self.image, self.label = read_images_from_disk(self.queue, self.input_size, random_scale)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_weak_cross_entropy_2d(self):
        """
        Test tflearn.objectives.weak_cross_entropy_2d
        """
        num_classes = 2
        batch_size = 3
        height, width = 5, 5
        shape = (batch_size, height, width, num_classes)
        y_pred = np.random.random(shape).astype(np.float32)
        target = np.random.randint(0, num_classes, np.prod(shape[:-1]))
        # convert to one-hot encoding
        y_true = np.eye(num_classes)[target].reshape(shape)

        with tf.Graph().as_default():
            y_pred = tf.convert_to_tensor(y_pred)
            y_true = tf.convert_to_tensor(y_true)

            loss = tflearn.objectives.weak_cross_entropy_2d(y_pred, y_true)

            with tf.Session() as sess:
                res = sess.run(loss)

        self.assertGreater(res, 0.)
        self.assertLess(res, 1.)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testComputation(self):
    # Initialize each embedding to its index. Thus, the lookup ids are the same
    # as the embeddings themselves.
    initializers = {"embeddings": tf.constant_initializer(
        [[0], [1], [2], [3], [4], [5], [6]], dtype=tf.float32)}
    embed_mod = snt.Embed(
        vocab_size=self._vocab_size,
        embed_dim=self._embed_dim,
        initializers=initializers)
    embeddings = embed_mod(tf.convert_to_tensor(self._ids))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      embeddings_ = sess.run(embeddings)
      expected_embeddings = np.reshape(
          self._ids, newshape=list(self._ids.shape) + [self._embed_dim])
      self.assertAllClose(embeddings_, expected_embeddings)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testPartitioners(self):
    # Partition embeddings such that there's one variable per vocabulary entry.
    partitioners = {"embeddings": tf.variable_axis_size_partitioner(
        4 * self._embed_dim)}
    embed_mod = snt.Embed(
        vocab_size=self._vocab_size,
        embed_dim=self._embed_dim,
        partitioners=partitioners)
    embeddings = embed_mod(tf.convert_to_tensor(self._ids))
    self.assertEqual(type(embed_mod.embeddings), variables.PartitionedVariable)
    self.assertEqual(len(embed_mod.embeddings), self._vocab_size)

    # Ensure that tf.nn.embedding_lookup() plays nicely with embedding
    # variables.
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(embeddings)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testLargeComputation(self, num_output_classes):
    self.setUpWithNumOutputClasses(
        num_output_classes, depth=3 * num_output_classes)
    self.setUpWithNumOutputClasses(num_output_classes)
    module = snt.nets.Dilation(
        num_output_classes=num_output_classes, model_size="large")
    x = module(tf.convert_to_tensor(self._images))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      x_ = sess.run(x)

      # Default initialization produces something like an operator, but the
      # number of channels differs. However, summing across channels should
      # recover a near-identical magnitude per-pixel.
      self.assertAllClose(
          np.sum(x_, axis=3), np.sum(self._images, axis=3), atol=1e-3)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInvalidModelSize(self):
    self.setUpWithNumOutputClasses(1)
    module = snt.nets.Dilation(
        num_output_classes=self._num_output_classes,
        model_size="invalid_model_size")

    with self.assertRaisesRegexp(ValueError, "Unrecognized model_size"):
      module(tf.convert_to_tensor(self._images))

    # The other check for model_size being valid is only reached when
    # weight initializers are provided. We need to test this as well to get
    # 100% test coverage.
    module = snt.nets.Dilation(
        num_output_classes=self._num_output_classes,
        initializers={"w": snt.nets.noisy_identity_kernel_initializer(1)},
        model_size="invalid_model_size")
    with self.assertRaisesRegexp(ValueError, "Unrecognized model_size"):
      module(tf.convert_to_tensor(self._images))
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testNoMemorySlotsLeft(self):
    # Every example must have at least one unmasked memory slot for attention
    # to work.
    memory_mask = tf.convert_to_tensor(
        [
            [True, True, True, True],
            [True, True, True, False],
            [False, False, False, False],
        ],
        dtype=tf.bool)
    attention_output = self._attention_mod(
        self._memory, self._query, memory_mask=memory_mask)
    x = attention_output.read
    with self.test_session() as sess:
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(x)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def create_decoder(self, helper, mode):
    attention_fn = AttentionLayerDot(
        params={"num_units": self.attention_dim},
        mode=tf.contrib.learn.ModeKeys.TRAIN)
    attention_values = tf.convert_to_tensor(
        np.random.randn(self.batch_size, self.input_seq_len, 32),
        dtype=tf.float32)
    attention_keys = tf.convert_to_tensor(
        np.random.randn(self.batch_size, self.input_seq_len, 32),
        dtype=tf.float32)
    params = AttentionDecoder.default_params()
    params["max_decode_length"] = self.max_decode_length
    return AttentionDecoder(
        params=params,
        mode=mode,
        vocab_size=self.vocab_size,
        attention_keys=attention_keys,
        attention_values=attention_values,
        attention_values_length=np.arange(self.batch_size) + 1,
        attention_fn=attention_fn)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def setUp(self):
    super(BridgeTest, self).setUp()
    self.batch_size = 4
    self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
    self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
    final_encoder_state = nest.map_structure(
        lambda x: tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, x),
            dtype=tf.float32),
        self.encoder_cell.state_size)
    self.encoder_outputs = EncoderOutput(
        outputs=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values_length=np.full([self.batch_size], 10),
        final_state=final_encoder_state)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _test_with_residuals(self, inputs, **kwargs):
    """Runs the cell in a session"""
    inputs = tf.convert_to_tensor(inputs)
    state = (tf.constant(np.random.randn(1, 2)),
             tf.constant(np.random.randn(1, 2)))

    with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
      test_cell = rnn_cell.ExtendedMultiRNNCell(
          [tf.contrib.rnn.GRUCell(2) for _ in range(2)],
          residual_connections=True,
          **kwargs)
      res_test = test_cell(inputs, state, scope="test")

    with self.test_session() as sess:
      sess.run([tf.global_variables_initializer()])
      return sess.run(res_test)
项目:spoofnet-tensorflow    作者:yomna-safaa    | 项目源码 | 文件源码
def _aspect_preserving_resize(image, smallest_side):
  """Resize images preserving the original aspect ratio.

  Args:
    image: A 3-D image `Tensor`.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    resized_image: A 3-D tensor containing the resized image.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  shape = tf.shape(image)
  height = shape[0]
  width = shape[1]
  new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
  image = tf.expand_dims(image, 0)
  resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
                                           align_corners=False)
  resized_image = tf.squeeze(resized_image)
  resized_image.set_shape([None, None, 3])
  return resized_image
项目:spoofnet-tensorflow    作者:yomna-safaa    | 项目源码 | 文件源码
def _aspect_preserving_resize(image, smallest_side):
  """Resize images preserving the original aspect ratio.

  Args:
    image: A 3-D image `Tensor`.
    smallest_side: A python integer or scalar `Tensor` indicating the size of
      the smallest side after resize.

  Returns:
    resized_image: A 3-D tensor containing the resized image.
  """
  smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)

  shape = tf.shape(image)
  height = shape[0]
  width = shape[1]
  new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
  image = tf.expand_dims(image, 0)
  resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
                                           align_corners=False)
  resized_image = tf.squeeze(resized_image)
  resized_image.set_shape([None, None, 3])
  return resized_image
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def pil_single_test_SET5(path):

    a,b,c = argument_sr.options.get_set5(path)

    lrs = array(a)
    hr2s = array(b)
    hr4s = array(c)
    lrs = tf.convert_to_tensor(lrs, dtype=tf.float32)
    hr2s = tf.convert_to_tensor(hr2s, dtype=tf.float32)
    hr4s = tf.convert_to_tensor(hr4s, dtype=tf.float32)

    lrs = tf.expand_dims(lrs, 0)
    hr2s = tf.expand_dims(hr2s, 0)
    hr4s = tf.expand_dims(hr4s, 0)
    lrs = tf.expand_dims(lrs, 3)
    hr2s = tf.expand_dims(hr2s, 3)
    hr4s = tf.expand_dims(hr4s, 3)
    return lrs, hr2s, hr4s, None
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss