Python tensorflow 模块,fill() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.fill()

项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params):
    """
    A super model that combine one or more models
    """
    models = FLAGS.wide_and_deep_models
    outputs = []
    for model_name in map(lambda x: x.strip(), models.split(",")):
      model = getattr(frame_level_models, model_name, None)()
      output = model.create_model(model_input, vocab_size, num_frames, l2_penalty=l2_penalty, **unused_params)["predictions"]
      outputs.append(tf.expand_dims(output, axis=2))
    num_models = len(outputs)
    model_outputs = tf.concat(outputs, axis=2)
#    linear_combination = tf.get_variable("combine", shape=[vocab_size,num_models],
#        dtype=tf.float32, initializer=tf.zeros_initializer(),
#        regularizer=slim.l2_regularizer(l2_penalty))
#    combination = tf.nn.softmax(linear_combination)
    combination = tf.fill(dims=[vocab_size,num_models], value=1.0/num_models)
    output_sum = tf.einsum("ijk,jk->ij", model_outputs, combination)
    return {"predictions": output_sum}
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
    # **kwargs is for unsupported options (ignored)
    cval = tf.fill(K.shape(image)[0:1], cval)
    shape = K.int_shape(image)[1:3]
    if center is None:
        center = np.array(shape) / 2
    ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
    xs = np.expand_dims(np.tile  (np.arange(shape[1]), shape[0]),-1)
    map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)

    mapping = np.zeros((*shape, *shape))
    for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
        results = tensor_linear_interpolation(image, map_x, map_y, cval)
        for _y, _x, w in results:
            # mapping[int(y),int(x),int(_y),int(_x),] = w
            mapping[int(_y),int(_x),int(y),int(x),] = w


    results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
    # results = K.reshape(results, K.shape(image))
    return results
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def initialize(self, name=None):

    finished = tf.tile([False], [self.config.beam_width])

    start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens)
    first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch)
    first_inputs = tf.expand_dims(first_inputs, 1)
    zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]])
    first_inputs = tf.concat([first_inputs, zeros_padding], axis=1)

    outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1]) 
    attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1]) 
    enc_output = EncoderOutput(
        outputs=outputs,
        final_state=self.initial_state.final_state,
        attention_values=attention_values,
        attention_values_length=self.initial_state.attention_values_length)


    return finished, first_inputs, enc_output
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def initialize(self, name=None):

    finished = tf.tile([False], [self.config.beam_width])

    start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens)
    first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch)
    first_inputs = tf.expand_dims(first_inputs, 1)
    zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]])
    first_inputs = tf.concat([first_inputs, zeros_padding], axis=1)
    beam_state = beam_search.create_initial_beam_state(self.config)    

    outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1]) 
    attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1]) 
    enc_output = EncoderOutput(
        outputs=outputs,
        final_state=self.initial_state.final_state,
        attention_values=attention_values,
        attention_values_length=self.initial_state.attention_values_length)


    return finished, first_inputs, (enc_output, beam_state)
项目:tf.rasterizer    作者:vahidk    | 项目源码 | 文件源码
def clear_fn(self):
        color = tf.placeholder(tf.float32, [3], name="ph_color")
        depth = tf.placeholder(tf.float32, [], name="ph_depth")
        packed_color = utils.pack_colors(color, 0)
        tiled_color = tf.fill([self.height, self.width], packed_color)
        tiled_depth = tf.fill([self.height, self.width], depth)
        assign_color = tf.assign(self.color, tiled_color)
        assign_depth = tf.assign(self.depth, tiled_depth)
        self.commands.append(assign_color)
        self.commands.append(assign_depth)

        def _clear(color_val=[0., 0., 0.], depth_val=FLT_MIN):
            self.args[color] = color_val
            self.args[depth] = depth_val

        return _clear
项目:Skeleton-key    作者:feiyu1990    | 项目源码 | 文件源码
def build_inference(self):
        embed = self.embedding
        context = self.context
        hidden = self.hidden
        features = self._cnn_encoding(embedding=embed, context=context, hidden=hidden)
        c = tf.zeros([tf.shape(self.context)[0], self.H])
        h = tf.zeros([tf.shape(self.context)[0], self.H])
        (self.init_c, self.init_h) = self._lstm(h, c, features, reuse=False)
        _ = self._decode_lstm(self.init_h)
        _ = self._word_embedding(inputs=tf.fill([tf.shape(features)[0]], self._start))

        self.in_word = tf.placeholder(tf.int32, [None])
        x = self._word_embedding(inputs=self.in_word, reuse=True)

        self.c_feed = tf.placeholder(tf.float32, [None, self.H])
        self.h_feed = tf.placeholder(tf.float32, [None, self.H])
        (self.c, self.h) = self._lstm(self.h_feed, self.c_feed, x, reuse=True)
        self.log_softmax = self._decode_lstm(self.h, reuse=True)
项目:discoGAN.tensorflow.slim    作者:ilguyi    | 项目源码 | 文件源码
def GANLoss(logits, is_real=True, smoothing=0.9, name=None):
  """Computes standard GAN loss between `logits` and `labels`.

  Args:
    logits: logits
    is_real: boolean, True means `1` labeling, False means `0` labeling
    smoothing: one side label smoothing

  Returns:
    A scalar Tensor representing the loss value.
  """
  if is_real:
    # one side label smoothing
    labels = tf.fill(logits.get_shape(), smoothing)
  else:
    labels = tf.zeros_like(logits)

  with ops.name_scope(name, 'GAN_loss', [logits, labels]) as name:
    loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                                labels=labels,
                                logits=logits))

    return loss
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
    """Mike Henry's implementation, with some minor modifications."""
    with self.G.as_default():
      label_shape = tf.shape( labels )
      num_batches_tns = tf.stack( [label_shape[0]] )
      max_num_labels_tns = tf.stack( [label_shape[1]] )

      def range_less_than(previous_state, current_input):
        return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input

      init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
      init = tf.expand_dims( init, 0 )
      dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[ :, 0, : ]

      label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
      label_ind = tf.boolean_mask( label_array, dense_mask )

      batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0,  label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
      batch_ind = tf.boolean_mask( batch_array, dense_mask )

      indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
      vals_sparse = tf.gather_nd( labels, indices )
      return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
项目:document-qa    作者:allenai    | 项目源码 | 文件源码
def apply(self, is_train, x, mask=None):
        if self.map_layer is not None:
            x = self.map_layer.apply(is_train, x, mask)

        rank = len(x.shape) - 2
        if mask is not None:
            shape = tf.shape(x)
            mask = tf.sequence_mask(tf.reshape(mask, (-1,)), shape[-2])
            mask = tf.cast(tf.reshape(mask, (shape[0], shape[1], shape[2], 1)), tf.float32)
            # this min_val thing is kind of a hack, really we should do something like compute the
            # min val over the entire batch, or maybe just pick a very negative values, or maybe
            # do something a bit more finicky with tf.bool_mask
            # In practice it doesn't seem to be problem, and some of the earlier models used these
            # scheme so I have been sticking with it.
            if self.min_val == 0:
                x *= mask
            else:
                x = x * mask + self.min_val * (1 - mask)
            return tf.maximum(tf.reduce_max(x, axis=rank), tf.fill([1] * (len(x.shape)-1),
                                                                   float(self.min_val)))
        else:
            return tf.reduce_max(x, axis=rank)
项目:tensorprob    作者:tensorprob    | 项目源码 | 文件源码
def Uniform(name=None):
    X = tf.placeholder(config.dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        return tf.cond(
            tf.logical_or(
                tf.is_inf(tf.cast(lower, config.dtype)),
                tf.is_inf(tf.cast(upper, config.dtype))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )

    Distribution.integral = integral

    return X
项目:tensorprob    作者:tensorprob    | 项目源码 | 文件源码
def UniformInt(name=None):
    X = tf.placeholder(config.int_dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        val = tf.cond(
            tf.logical_or(
                tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))),
                tf.is_inf(tf.floor(tf.cast(upper, config.dtype)))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )
        return val

    Distribution.integral = integral

    return X
项目:DDPG    作者:MOCR    | 项目源码 | 文件源码
def __init__(self, lin, lout, iniRange, graph= None):

        if graph!=None:
            with graph.as_default():

                self.v = tf.Variable(tf.random_uniform([lin, lout], iniRange[0], iniRange[1]))
                self.g = tf.Variable(tf.random_uniform([lout], -1.0,1.0))
                self.pow2 = tf.fill([lin, lout],2.0)
                self.v_norm = tf.sqrt(tf.reduce_sum(tf.pow(self.v, self.pow2),0))
                self.tile_div = tf.tile(tf.expand_dims(tf.div(self.g, self.v_norm),0),[lin, 1])
                self.w = tf.mul(self.tile_div, self.v)
        else:
            self.v = tf.Variable(tf.random_uniform([lin, lout], -1/math.sqrt(lin), 1/math.sqrt(lin)))
            self.g = tf.Variable(tf.random_uniform([lout], -1.0,1.0))
            self.pow2 = tf.fill([lin, lout],2.0)
            self.v_norm = tf.sqrt(tf.reduce_sum(tf.pow(self.v, self.pow2),0))
            self.tile_div = tf.tile(tf.expand_dims(tf.div(self.g, self.v_norm),0),[lin, 1])
            self.w = tf.mul(self.tile_div, self.v)
项目:tf-seq2seq-attn    作者:johncf    | 项目源码 | 文件源码
def infer(self, output_maxlen=128):
        """Build model for inference.
        """
        self.input_data = tf.placeholder(tf.int32, [1, None], name='input_data')
        self.input_lengths = None

        def infer_helper():
            return seq2seq.GreedyEmbeddingHelper(
                    self._output_onehot,
                    start_tokens=tf.fill([1], self._output_sos_id),
                    end_token=self._output_eos_id)

        self._build_model(1, infer_helper, decoder_maxiters=output_maxlen, alignment_history=True)

# Also See
#   https://groups.google.com/a/tensorflow.org/forum/#!topic/discuss/dw3Y2lnMAJc
项目:deep-learning-essentials    作者:DominicBreuker    | 项目源码 | 文件源码
def call(self, x, mask=None):
        X = x
        half_n = self.n // 2
        input_sqr = K.square(X)
        if K._BACKEND == 'theano':
            b, ch, r, c = X.shape
            extra_channels = T.alloc(0., b, ch + 2*half_n, r, c)
            input_sqr = T.set_subtensor(
                extra_channels[:, half_n:half_n+ch, :, :], input_sqr)
        elif K._BACKEND == 'tensorflow':
            b, ch, r, c = K.int_shape(X)
            up_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            up = tf.fill(up_dims, 0.0)
            middle = input_sqr
            down_dims = tf.pack([tf.shape(X)[0], half_n, r, c])
            down = tf.fill(down_dims, 0.0)
            input_sqr = K.concatenate([up, middle, down], axis=1)
        scale = self.k
        norm_alpha = self.alpha / self.n
        for i in range(self.n):
            scale += norm_alpha * input_sqr[:, i:i+ch, :, :]
        scale = scale ** self.beta
        result = X / scale
        return result
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def mask_decoder_reduce(logit, thin_stack_head_next, logit_size, batch_size):
  """Ensures that we can only reduce when the stack has at least 1 item.

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  # Allow reduce only if at least 1 item on stack, i.e., pointer >= 2.
  update_vals = tf.pack([-np.inf, -np.inf, 0.0])
  update_val = tf.gather(update_vals, 
      tf.minimum(thin_stack_head_next,
      2*tf.ones(tf.pack([batch_size]), dtype=tf.int32)))

  re_filled = tf.fill(tf.pack([batch_size]),
      tf.to_int64(data_utils.REDUCE_ID))
  re_inds = tf.transpose(tf.pack(
      [tf.to_int64(tf.range(batch_size)), re_filled]))
  re_delta = tf.SparseTensor(re_inds, update_val, tf.to_int64(
      tf.pack([batch_size, logit_size])))
  new_logit = logit + tf.sparse_tensor_to_dense(re_delta)
  return new_logit
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def gather_prev_stack_state_index(pointer_vals, prev_index, transition_state,
                                  batch_size):
  """Gathers new previous state index."""
  new_pointer_vals = tf.reshape(pointer_vals, [-1, 1])

  # Helper tensors.
  prev_vals = tf.reshape(tf.fill(
      tf.pack([batch_size]), prev_index), [-1, 1])
  trans_inds = tf.transpose(tf.pack(
      [tf.range(batch_size), transition_state]))

  # Gather new prev state for main tf.nn. Pointer vals if reduce, else prev.
  # State inds dimension [batch_size, NUM_TR_STATES]
  state_inds = tf.concat(1, [prev_vals]*6 + [new_pointer_vals, prev_vals])
  prev_state_index = tf.gather_nd(state_inds, trans_inds)
  return prev_state_index
项目:DeepDeepParser    作者:janmbuys    | 项目源码 | 文件源码
def gather_prev_stack_aux_state_index(pointer_vals, prev_index, transition_state, 
                                      batch_size):
  """Gather new prev state index for aux rnn: as for main, but zero if shift."""
  new_pointer_vals = tf.reshape(pointer_vals, [-1, 1])

  # Helper tensors.
  prev_vals = tf.reshape(tf.fill(
      tf.pack([batch_size]), prev_index), [-1, 1])
  trans_inds = tf.transpose(tf.pack(
      [tf.range(batch_size), transition_state]))
  batch_zeros = tf.reshape(tf.zeros(
            tf.pack([batch_size]), dtype=tf.int32), [-1, 1])

  # Gather new prev state for aux tf.nn.
  # State inds dimension [batch_size, NUM_TR_STATES]
  state_inds = tf.concat(1, 
      [prev_vals, batch_zeros] + [prev_vals]*4 + [new_pointer_vals, prev_vals])
  prev_state_index = tf.gather_nd(state_inds, trans_inds)
  return prev_state_index
项目:dcgan.tensorflow    作者:ilguyi    | 项目源码 | 文件源码
def GANLoss(logits, is_real=True, smoothing=0.9, name=None):
  """Computes standard GAN loss between `logits` and `labels`.

  Args:
    logits: A float32 Tensor of logits.
    is_real: boolean, True means `1` labeling, False means `0` labeling.
    smoothing: one side labels smoothing.

  Returns:
    A scalar Tensor representing the loss value.
  """
  if is_real:
    # one side label smoothing
    labels = tf.fill(logits.get_shape(), smoothing)
  else:
    labels = tf.zeros_like(logits)

  with ops.name_scope(name, 'GAN_loss', [logits, labels]) as name:
    loss = tf.reduce_mean(
              tf.nn.sigmoid_cross_entropy_with_logits(
                              labels=labels,
                              logits=logits))
    return loss
项目:tensorflow    作者:luyishisi    | 项目源码 | 文件源码
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_)
项目:triplet-reid    作者:VisualComputingInstitute    | 项目源码 | 文件源码
def sample_k_fids_for_pid(pid, all_fids, all_pids, batch_k):
    """ Given a PID, select K FIDs of that specific PID. """
    possible_fids = tf.boolean_mask(all_fids, tf.equal(all_pids, pid))

    # The following simply uses a subset of K of the possible FIDs
    # if more than, or exactly K are available. Otherwise, we first
    # create a padded list of indices which contain a multiple of the
    # original FID count such that all of them will be sampled equally likely.
    count = tf.shape(possible_fids)[0]
    padded_count = tf.cast(tf.ceil(batch_k / count), tf.int32) * count
    full_range = tf.mod(tf.range(padded_count), count)

    # Sampling is always performed by shuffling and taking the first k.
    shuffled = tf.random_shuffle(full_range)
    selected_fids = tf.gather(possible_fids, shuffled[:batch_k])

    return selected_fids, tf.fill([batch_k], pid)
项目:tf-example-models    作者:aakhundov    | 项目源码 | 文件源码
def plot_fitted_data(points, c_means, c_variances):
    """Plots the data and given Gaussian components"""
    plt.plot(points[:, 0], points[:, 1], "b.", zorder=0)
    plt.plot(c_means[:, 0], c_means[:, 1], "r.", zorder=1)

    for i in range(c_means.shape[0]):
        std = np.sqrt(c_variances[i])
        plt.axes().add_artist(pat.Ellipse(
            c_means[i], 2 * std[0], 2 * std[1],
            fill=False, color="red", linewidth=2, zorder=1
        ))

    plt.show()


# PREPARING DATA

# generating DATA_POINTS points from a GMM with COMPONENTS components
项目:tf-example-models    作者:aakhundov    | 项目源码 | 文件源码
def initialize(self, dtype=tf.float64):
        if self.tf_mean is None:
            if self.mean is not None:
                self.tf_mean = tf.Variable(self.mean, dtype=dtype)
            else:
                self.tf_mean = tf.Variable(tf.cast(tf.fill([self.dims], 0.0), dtype))

        if self.tf_covariance is None:
            if self.covariance is not None:
                self.tf_covariance = self.covariance
            else:
                self.tf_covariance = FullCovariance(self.dims)

            self.tf_covariance.initialize(dtype)

        if self.tf_ln2piD is None:
            self.tf_ln2piD = tf.constant(np.log(2 * np.pi) * self.dims, dtype=dtype)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def constrain_logits(self, logits, curr_state):
        with tf.name_scope('constrain_logits'):
            allowed_tokens = tf.gather(tf.constant(self.allowed_token_matrix), curr_state)
            assert allowed_tokens.get_shape()[1:] == (self.output_size,)

            constrained_logits = tf.where(allowed_tokens, logits, tf.fill(tf.shape(allowed_tokens), -1e+10))
        return constrained_logits
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
    """Runs decoding in inference mode"""
    batch_size = self.batch_size(features, labels)
    if self.use_beam_search:
      batch_size = self.params["inference.beam_search.beam_width"]

    target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
    helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
        embedding=self.target_embedding,
        start_tokens=tf.fill([batch_size], target_start_id),
        end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
    decoder_initial_state = bridge()
    return decoder(decoder_initial_state, helper_infer)
项目:rlflow    作者:tpbarron    | 项目源码 | 文件源码
def stddev(x):
    x = tf.to_float(x)
    return tf.sqrt(tf.reduce_mean(tf.square(tf.abs
        (tf.sub(x, tf.fill(x.get_shape(), tf.reduce_mean(x)))))))
项目:QA    作者:S-H-Y-GitHub    | 项目源码 | 文件源码
def getLoss(trueCosSim, falseCosSim, margin):
        zero = tf.fill(tf.shape(trueCosSim), 0.0)
        tfMargin = tf.fill(tf.shape(trueCosSim), margin)
        with tf.name_scope("loss"):
            losses = tf.maximum(zero, tf.subtract(tfMargin, tf.subtract(trueCosSim, falseCosSim)))
            loss = tf.reduce_sum(losses)
        return loss
项目:yt8m    作者:forwchen    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def init_memory(self, batch_size):
        """
        Returns the memory state for step 0. Used in DNC for the argument to tf.while_loop

        :return: 
        """
        read_weightings = tf.fill([batch_size, self.memory_size, self.num_read_heads], Memory.epsilon)
        write_weighting = tf.fill([batch_size, self.memory_size], Memory.epsilon, name="Write_weighting")
        precedence_weighting = tf.zeros([batch_size, self.memory_size], name="Precedence_weighting")
        m = tf.fill([batch_size, self.memory_size, self.word_size], Memory.epsilon)  # initial memory matrix
        usage_vector = tf.zeros([batch_size, self.memory_size], name="Usage_vector")
        link_matrix = tf.zeros([batch_size, self.memory_size, self.memory_size])
        read_vectors = tf.fill([batch_size, self.num_read_heads, self.word_size], Memory.epsilon)

        return [read_weightings, write_weighting, usage_vector, precedence_weighting, m, link_matrix, read_vectors]
项目:seglink    作者:bgshih    | 项目源码 | 文件源码
def _cls_mining(self, scores, status, hard_neg_ratio=3.0, scope=None):
    """
    Positive classification loss and hard negative classificatin loss
    ARGS
      scores: [n, n_classes]
      status: int [n] node or link matching status
    RETURNS
      pos_loss: []
      n_pos: int []
      hard_neg_loss: []
      n_hard_neg: []
    """
    with tf.variable_scope(scope or 'cls_mining'):
      # positive classification loss
      pos_mask = tf.equal(status, MATCH_STATUS_POS)
      pos_scores = tf.boolean_mask(scores, pos_mask)
      n_pos = tf.shape(pos_scores)[0]
      pos_labels = tf.fill([n_pos], POS_LABEL)
      pos_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=pos_scores, labels=pos_labels))

      # hard negative classification loss
      neg_mask = tf.equal(status, MATCH_STATUS_NEG)
      neg_scores = tf.boolean_mask(scores, neg_mask)
      n_neg = tf.shape(neg_scores)[0]
      n_hard_neg = tf.cast(n_pos, tf.float32) * hard_neg_ratio
      n_hard_neg = tf.minimum(n_hard_neg, tf.cast(n_neg, tf.float32))
      n_hard_neg = tf.cast(n_hard_neg, tf.int32)
      neg_prob = tf.nn.softmax(neg_scores)[:, NEG_LABEL]
      # find the k examples with the least negative probabilities
      _, hard_neg_indices = tf.nn.top_k(-neg_prob, k=n_hard_neg)
      hard_neg_scores = tf.gather(neg_scores, hard_neg_indices)
      hard_neg_labels = tf.fill([n_hard_neg], NEG_LABEL)
      hard_neg_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=hard_neg_scores, labels=hard_neg_labels))

      return pos_loss, n_pos, hard_neg_loss, n_hard_neg
项目:jack    作者:uclmr    | 项目源码 | 文件源码
def apply_attention(attn_scores, states, length, is_self=False, with_sentinel=True, reuse=False):
    attn_scores += tf.expand_dims(misc.mask_for_lengths(length, tf.shape(attn_scores)[2]), 1)
    if is_self:
        # exclude attending to state itself
        attn_scores += tf.expand_dims(tf.diag(tf.fill([tf.shape(attn_scores)[1]], -1e6)), 0)
    if with_sentinel:
        with tf.variable_scope('sentinel', reuse=reuse):
            s = tf.get_variable('score', [1, 1, 1], tf.float32, tf.zeros_initializer())
        s = tf.tile(s, [tf.shape(attn_scores)[0], tf.shape(attn_scores)[1], 1])
        attn_probs = tf.nn.softmax(tf.concat([s, attn_scores], 2))
        attn_probs = attn_probs[:, :, 1:]
    else:
        attn_probs = tf.nn.softmax(attn_scores)
    attn_states = tf.einsum('abd,adc->abc', attn_probs, states)
    return attn_scores, attn_probs, attn_states
项目:PyTorchDemystified    作者:hhsecond    | 项目源码 | 文件源码
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
    # Take off the last column
    sliced = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
    # Append a column filled with <GO>
    decoder_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), sliced], 1)
    return decoder_input
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = tf.pack([label_shape[0]])
    max_num_labels_tns = tf.pack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), tf.reverse(label_shape, [True])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
项目:mlc2017-online    作者:machine-learning-challenge    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:mlc2017-online    作者:machine-learning-challenge    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:youtube-8m    作者:google    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:Video-Classification    作者:boyaolin    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def testRandomPatchImageBboxes(self):
        """Tests the integrity of the return values of random_patch

        When bboxes is not None.
        """
        im_shape = (800, 600, 3)
        total_boxes = 5
        # We don't care about the label
        label = 3
        # First test case, we use randomly generated image and bboxes.
        image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
        # Add a label to each bbox.
        bboxes_w_label = tf.concat(
            [
                bboxes,
                tf.fill((bboxes.shape[0], 1), label)
            ],
            axis=1
        )
        config = self._random_patch_config
        ret_image, ret_bboxes = self._random_patch(
            image, config, bboxes_w_label
        )
        # Assertions
        self.assertLessEqual(ret_bboxes.shape[0], total_boxes)
        self.assertGreater(ret_bboxes.shape[0], 0)
        self.assertTrue(np.all(ret_bboxes >= 0))
        self.assertTrue(np.all(
            ret_bboxes[:, 0] <= ret_image.shape[1]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 1] <= ret_image.shape[0]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 2] <= ret_image.shape[1]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 3] <= ret_image.shape[0]
        ))
        self.assertTrue(np.all(ret_image.shape <= im_shape))
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def testRandomPatchLargerThanImage(self):
        """Tests random_patch normalizes the minimum sizes.
        """
        im_shape = (600, 800, 3)
        total_boxes = 5
        config = EasyDict({
            'min_height': 900,
            'min_width': 900
        })
        label = 3
        image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
        # Add a label to each bbox.
        bboxes_w_label = tf.concat(
            [
                bboxes,
                tf.fill((bboxes.shape[0], 1), label)
            ],
            axis=1
        )
        ret_image, ret_bboxes = self._random_patch(
            image, config, bboxes_w_label
        )
        # Assertions
        self.assertLessEqual(ret_bboxes.shape[0], total_boxes)
        self.assertGreater(ret_bboxes.shape[0], 0)
        self.assertTrue(np.all(ret_bboxes >= 0))
        self.assertTrue(np.all(
            ret_bboxes[:, 0] <= ret_image.shape[1]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 1] <= ret_image.shape[0]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 2] <= ret_image.shape[1]
        ))
        self.assertTrue(np.all(
            ret_bboxes[:, 3] <= ret_image.shape[0]
        ))
        self.assertTrue(np.all(ret_image.shape <= im_shape))
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def testRandomResizeImageBboxes(self):
        """Tests the integrity of the return values of random_resize

        This tests the case when bboxes is not None.
        """
        im_shape = (600, 800, 3)
        config = self._random_resize_config
        total_boxes = 5
        label = 3

        image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
        # Add a label to each bbox.
        bboxes_w_label = tf.concat(
            [
                bboxes,
                tf.fill((bboxes.shape[0], 1), label)
            ],
            axis=1
        )
        ret_image, ret_bboxes = self._random_resize(
            image, config, bboxes_w_label
        )
        # Assertions
        self.assertEqual(ret_bboxes.shape[0], total_boxes)
        self.assertTrue(np.all(
            np.asarray(ret_image.shape[:2]) >= config.min_size
        ))
        self.assertTrue(np.all(
            np.asarray(ret_image.shape[:2]) <= config.max_size
        ))
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def testRandomDistort(self):
        """Tests the integrity of the return values of random_distortion.
        """
        im_shape = (600, 900, 3)
        config = self._random_distort_config
        total_boxes = 5
        label = 3

        image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
        # Add a label to each bbox.
        bboxes_w_label = tf.concat(
            [
                bboxes,
                tf.fill((bboxes.shape[0], 1), label)
            ],
            axis=1
        )

        ret_image, ret_bboxes = self._random_distort(
            image, config, bboxes_w_label
        )
        # Assertions
        self.assertEqual(im_shape, ret_image.shape)
        self.assertAllEqual(
            bboxes, ret_bboxes[:, :4]
        )
项目:luminoth    作者:tryolabs    | 项目源码 | 文件源码
def testSmallRandomDistort(self):
        """Tests random_distort with small-change arguments.

        We pass parameters to random_distort that make it so that it should
        change the image relatively little, and then check that in fact it
        changed relatively little.
        """
        total_boxes = 3
        im_shape = (600, 900, 3)
        config = EasyDict({
            'brightness': {
                'max_delta': 0.00001,
            },
            'hue': {
                'max_delta': 0.00001,
            },
            'saturation': {
                'lower': 0.99999,
                'upper': 1.00001,
            },
            'contrast': {
                'lower': 0.99999,
                'upper': 1.00001
            }
        })
        label = 3
        image, bboxes = self._get_image_with_boxes(im_shape, total_boxes)
        # Add a label to each bbox.
        bboxes_w_label = tf.concat(
            [
                bboxes,
                tf.fill((bboxes.shape[0], 1), label)
            ],
            axis=1
        )
        ret_image, ret_bboxes = self._random_distort(
            image, config, bboxes_w_label
        )
        # Assertions
        large_number = 0.1
        self.assertAllClose(image, ret_image, rtol=0.05, atol=large_number)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_fill(self):
        # computation
        f = tf.fill([2, 3], 5)

        # test
        self.run(f)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testInitialStateTuple(self, trainable, use_custom_initial_value,
                            state_size):
    batch_size = 6

    # Set the attribute to the class since it we can't set properties of
    # abstract classes
    snt.RNNCore.state_size = state_size
    flat_state_size = nest.flatten(state_size)
    core = snt.RNNCore(name="dummy_core")
    if use_custom_initial_value:
      flat_initializer = [tf.constant_initializer(2)] * len(flat_state_size)
      trainable_initializers = nest.pack_sequence_as(
          structure=state_size, flat_sequence=flat_initializer)
    else:
      trainable_initializers = None
    initial_state = core.initial_state(
        batch_size, dtype=tf.float32, trainable=trainable,
        trainable_initializers=trainable_initializers)

    nest.assert_same_structure(initial_state, state_size)
    flat_initial_state = nest.flatten(initial_state)

    for state, size in zip(flat_initial_state, flat_state_size):
      self.assertEqual(state.get_shape(), [batch_size, size])

    with self.test_session() as sess:
      tf.global_variables_initializer().run()
      flat_initial_state_value = sess.run(flat_initial_state)
      for value, size in zip(flat_initial_state_value, flat_state_size):
        expected_initial_state = np.empty([batch_size, size])
        if not trainable:
          expected_initial_state.fill(0)
        elif use_custom_initial_value:
          expected_initial_state.fill(2)
        else:
          value_row = value[0]
          expected_initial_state = np.tile(value_row, (batch_size, 1))
        self.assertAllClose(value, expected_initial_state)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _decode_infer(self, decoder, bridge, _encoder_output, features, labels):
    """Runs decoding in inference mode"""
    batch_size = self.batch_size(features, labels)
    if self.use_beam_search:
      batch_size = self.params["inference.beam_search.beam_width"]

    target_start_id = self.target_vocab_info.special_vocab.SEQUENCE_START
    helper_infer = tf_decode_helper.GreedyEmbeddingHelper(
        embedding=self.target_embedding,
        start_tokens=tf.fill([batch_size], target_start_id),
        end_token=self.target_vocab_info.special_vocab.SEQUENCE_END)
    decoder_initial_state = bridge()
    return decoder(decoder_initial_state, helper_infer)
项目:Youtube-8M-WILLOW    作者:antoine77340    | 项目源码 | 文件源码
def resize_axis(tensor, axis, new_size, fill_value=0):
  """Truncates or pads a tensor to new_size on on a given axis.

  Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
  size increases, the padding will be performed at the end, using fill_value.

  Args:
    tensor: The tensor to be resized.
    axis: An integer representing the dimension to be sliced.
    new_size: An integer or 0d tensor representing the new value for
      tensor.shape[axis].
    fill_value: Value to use to fill any new entries in the tensor. Will be
      cast to the type of tensor.

  Returns:
    The resized tensor.
  """
  tensor = tf.convert_to_tensor(tensor)
  shape = tf.unstack(tf.shape(tensor))

  pad_shape = shape[:]
  pad_shape[axis] = tf.maximum(0, new_size - shape[axis])

  shape[axis] = tf.minimum(shape[axis], new_size)
  shape = tf.stack(shape)

  resized = tf.concat([
      tf.slice(tensor, tf.zeros_like(shape), shape),
      tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
  ], axis)

  # Update shape.
  new_shape = tensor.get_shape().as_list()  # A copy is being made.
  new_shape[axis] = new_size
  resized.set_shape(new_shape)
  return resized
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def scale_by_min_max(x, output_min=0.0, output_max=1.0, name=None):
  """Scale a numerical column into the range [output_min, output_max].

  Args:
    x: A numeric `Tensor`.
    output_min: The minimum of the range of output values.
    output_max: The maximum of the range of output values.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the input column scaled to [output_min, output_max].

  Raises:
    ValueError: If output_min, output_max have the wrong order.
  """
  with tf.name_scope(name, 'scale_by_min_max'):
    if output_min >= output_max:
      raise ValueError('output_min must be less than output_max')

    x = tf.to_float(x)
    min_x_value = analyzers.min(x)
    max_x_value = analyzers.max(x)

    x_shape = tf.shape(x)

    # If min==max, the result will be the mean of the requested range.
    # Note that both the options of tf.where are computed, which means that this
    # will compute unused NaNs.
    scaled_result = tf.where(
        tf.fill(x_shape, min_x_value < max_x_value),
        (x - min_x_value) / (max_x_value - min_x_value), tf.fill(x_shape, 0.5))

    return (scaled_result * (output_max - output_min)) + output_min