Python tensorflow 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.zeros()

项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss_distill_relabel(self, predictions, labels_distill, labels, **unused_params):
    with tf.name_scope("loss_distill_relabel"):
      print("loss_distill_relabel")
      epsilon = 10e-6
      float_labels = tf.cast(labels, tf.float32)
      sum_labels = tf.cast(tf.reduce_sum(float_labels),dtype=tf.int32)
      pos_distill, _ = tf.nn.top_k(tf.reshape(labels_distill,[-1]), k=sum_labels)
      labels_true = tf.ones(tf.shape(labels))
      labels_false = tf.zeros(tf.shape(labels))
      labels_add = tf.where(tf.greater_equal(labels_distill, pos_distill[-1]), labels_true, labels_false)
      print(labels_add.get_shape().as_list())
      float_labels = float_labels+labels_add*(1.0-float_labels)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)

      return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
项目:tensorflow_qrnn    作者:icoxfog417    | 项目源码 | 文件源码
def forward(self, x):
        length = lambda mx: int(mx.get_shape()[0])

        with tf.variable_scope("QRNN/Forward"):
            if self.c is None:
                # init context cell
                self.c = tf.zeros([length(x), self.kernel.size], dtype=tf.float32)

            if self.conv_size <= 2:
                # x is batch_size x sentence_length x word_length
                # -> now, transpose it to sentence_length x batch_size x word_length
                _x = tf.transpose(x, [1, 0, 2])

                for i in range(length(_x)):
                    t = _x[i] # t is batch_size x word_length matrix
                    f, z, o = self.kernel.forward(t)
                    self._step(f, z, o)
            else:
                c_f, c_z, c_o = self.kernel.conv(x)
                for i in range(length(c_f)):
                    f, z, o = c_f[i], c_z[i], c_o[i]
                    self._step(f, z, o)

        return self.h
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
项目:sea-lion-counter    作者:rdinse    | 项目源码 | 文件源码
def generateCountMaps(self, coords):
    '''Generates a count map for the provided list of coordinates.
    '''
    s = self.config['projective_field_size']
    target_size = 3 + self.config['output_size'] + 2 * s 
    count_maps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)

    shift = - self.config['contextual_pad']
    size = self.config['tile_size']
    for coord in coords:
      y = coord[1] + shift
      x = coord[2] + shift
      if y >= 0 and y < size and \
         x >= 0 and x < size:

        self.inc_region(count_maps[coord[0]], *self.target_sizes[y, x])

    return np.moveaxis(count_maps, 0, -1).astype(np.float32)
项目:a-nice-mc    作者:ermongroup    | 项目源码 | 文件源码
def __call__(self, inputs, steps):
        def fn(zv, x):
            """
            Transition for training, without Metropolis-Hastings.
            `z` is the input state.
            `v` is created as a dummy variable to allow output of v_, for training p(v).
            :param x: variable only for specifying the number of steps
            :return: next state `z_`, and the corresponding auxiliary variable `v_`.
            """
            z, v = zv
            v = tf.random_normal(shape=tf.stack([tf.shape(z)[0], self.network.v_dim]))
            z_, v_ = self.network.forward([z, v])
            return z_, v_

        elems = tf.zeros([steps])
        return tf.scan(fn, elems, inputs, back_prop=True)
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def call(self, inputs, mask=None, initial_state=None, training=None):
        inputs_shape = K.shape(inputs)
        zeros = tf.zeros(
            shape=[
                inputs_shape[0],
                inputs_shape[1] - 1,
                self.layer.units
            ]
        )
        outputs = self.layer.call(
            inputs=inputs,
            mask=mask,
            initial_state=initial_state,
            training=training
        )
        outputs = K.reshape(
            tf.slice(outputs, [0, inputs_shape[1] - 1, 0], [-1, 1, -1]),
            shape=(inputs_shape[0], 1, self.layer.units)
        )
        outputs = K.concatenate([outputs, zeros], axis=1)

        if 0 < self.layer.dropout + self.layer.recurrent_dropout:
            outputs._uses_learning_phase = True
        return outputs
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def init_params(self, trainable=True, **kwargs):

        i_shape, k_shape = self.shapes

        # Compute effective number of neurons per filter. Ignores padding.
        conv_out = i_shape[0] * i_shape[1]
        if hasattr(self, 'pool_side'): conv_out /= self.pool_side**2
        elif hasattr(self, 'pool_width'): conv_out /= self.pool_width

        self.params['W'] = xavier_init(self.n_visible, self.n_hidden * conv_out,
                                       shape=k_shape + [self.n_hidden],
                                       name='W', trainable=trainable, dtype=self.dtype)
        self.params['bhid'] = tf.Variable(tf.zeros(self.n_hidden, dtype=self.dtype),
                                          name='bhid', trainable=trainable)
        self.params['bvis'] = tf.Variable(tf.zeros(i_shape, dtype=self.dtype),
                                          name='bvis', trainable=trainable)
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def __init__(self, pool_side=2, **kwargs):
        """
        pool_side:
        Do max pooling on pool_side x pool_side non-overlapping
        patches of input.
        """

        Conv.__init__(self, **kwargs)

        if not kwargs.get('fromfile'):
            self.pool_side = pool_side
            self.shapes.append([])

        # Pool shape
        input_size = self.shapes[0] if self.padding == 'SAME' else \
                     [self.shapes[0][i] - self.shapes[1][i] + 1 for i in range(2)]
        self.shapes[2] = [self.batch_size] + \
                         [input_size[i] / self.strides[i+1] /
                          self.pool_side for i in range(2)] + \
                          [self.pool_side**2, self.n_hidden]
        self.zeros = tf.zeros(self.shapes[2], dtype=self.dtype)
        self.state = {}
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def _random_overlay(self, static_hidden=False):
        """Construct random max pool locations."""

        s = self.shapes[2]

        if static_hidden:
            args = np.random.randint(s[2], size=np.prod(s) / s[2] / s[4])
            overlay = np.zeros(np.prod(s) / s[4], np.bool)
            overlay[args + np.arange(len(args)) * s[2]] = True
            overlay = overlay.reshape([s[0], s[1], s[3], s[2]])
            overlay = np.rollaxis(overlay, -1, 2)
            return arrays.extend(overlay, s[4])
        else:
            args = np.random.randint(s[2], size=np.prod(s) / s[2])
            overlay = np.zeros(np.prod(s), np.bool)
            overlay[args + np.arange(len(args)) * s[2]] = True
            overlay = overlay.reshape([s[0], s[1], s[3], s[4], s[2]])
            return np.rollaxis(overlay, -1, 2)
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def answer_module(self):
        """ Answer Module:generate an answer from the final memory vector.
        Input:
            hidden state from episodic memory module:[batch_size,hidden_size]
            question:[batch_size, embedding_size]
        """
        steps=self.sequence_length if self.decode_with_sequences else 1 #decoder for a list of tokens with sequence. e.g."x1 x2 x3 x4..."
        a=self.m_T #init hidden state
        y_pred=tf.zeros((self.batch_size,self.hidden_size)) #TODO usually we will init this as a special token '<GO>', you can change this line by pass embedding of '<GO>' from outside.
        logits_list=[]
        logits_return=None
        for i in range(steps):
            cell = rnn.GRUCell(self.hidden_size)
            y_previous_q=tf.concat([y_pred,self.query_embedding],axis=1) #[batch_hidden_size*2]
            _, a = cell( y_previous_q,a)
            logits=tf.layers.dense(a,units=self.num_classes) #[batch_size,vocab_size]
            logits_list.append(logits)
        if self.decode_with_sequences:#need to get sequences.
            logits_return = tf.stack(logits_list, axis=1)  # [batch_size,sequence_length,num_classes]
        else:#only need to get an answer, not sequences
            logits_return = logits_list[0]  #[batcj_size,num_classes]

        return logits_return
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def init_var(self):
        self.rand_h = tf.random_uniform([1], 1.0 - float(self.rnd_hflip), 1.0)
        self.rand_v = tf.random_uniform([1], 1.0 - float(self.rnd_vflip), 1.0)
        self.rand_t = tf.random_uniform(
            [1], 1.0 - float(self.rnd_transpose), 1.0)
        self.offset = tf.random_uniform(
            [2], dtype='int32', maxval=self.padding * 2 + self.shrink)
        if self._debug:
            self.offset = tf.Print(self.offset,
                                   ['Forward RND module', self.offset])
        if self.rnd_size:
            self.space = 2 * self.padding - self.offset
            self.offset20 = tf.random_uniform(
                [], dtype='int32', maxval=self.space[0] * 2) - self.space[0]
            self.offset21 = tf.random_uniform(
                [], dtype='int32', maxval=self.space[1] * 2) - self.space[1]
            self.offset2 = tf.pack([self.offset20, self.offset21])
        else:
            self.offset2 = tf.zeros([2], dtype='int32')
        pass
项目:querysum    作者:helmertz    | 项目源码 | 文件源码
def _custom_one_step_rnn_loop_fn(self, initial_input_word_embedding, initial_cell_state):
        def loop_fn(time, cell_output, cell_state, loop_state):
            if cell_output is None:  # time == 0
                next_cell_state = initial_cell_state
                context_vector, attention_logits = self._attention(next_cell_state, initial_input_word_embedding)
                pointer_probabilities = self._pointer_probabilities(context_vector, next_cell_state,
                                                                    initial_input_word_embedding)
                next_input = tf.concat(
                    [initial_input_word_embedding, context_vector, self.pre_computed_query_state_placeholder], axis=1)
                next_loop_state = (context_vector, attention_logits, pointer_probabilities)
            else:
                next_cell_state = cell_state
                next_input = tf.zeros(shape=[self.batch_size,
                                             self.word_embedding_dim +
                                             self.encoder_output_size +
                                             self.encoder_cell_state_size
                                             ])
                next_loop_state = loop_state

            elements_finished = cell_output is not None

            emit_output = cell_output
            return elements_finished, next_input, next_cell_state, emit_output, next_loop_state

        return loop_fn
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _generate_labels(self, overlaps):
    labels = tf.Variable(tf.ones(shape=(tf.shape(overlaps)[0],), dtype=tf.float32) * -1, trainable=False,
                         validate_shape=False)
    gt_max_overlaps = tf.arg_max(overlaps, dimension=0)
    anchor_max_overlaps = tf.arg_max(overlaps, dimension=1)
    mask = tf.one_hot(anchor_max_overlaps, tf.shape(overlaps)[1], on_value=True, off_value=False)
    max_overlaps = tf.boolean_mask(overlaps, mask)
    if self._debug:
      max_overlaps = tf.Print(max_overlaps, [max_overlaps])
    labels = tf.scatter_update(labels, gt_max_overlaps, tf.ones((tf.shape(gt_max_overlaps)[0],)))
    # TODO: extract config object
    over_threshold_mask = tf.reshape(tf.where(max_overlaps > 0.5), (-1,))
    if self._debug:
      over_threshold_mask = tf.Print(over_threshold_mask, [over_threshold_mask], message='over threshold index : ')
    labels = tf.scatter_update(labels, over_threshold_mask, tf.ones((tf.shape(over_threshold_mask)[0],)))
    # TODO: support clobber positive in the origin implement
    below_threshold_mask = tf.reshape(tf.where(max_overlaps < 0.3), (-1,))
    if self._debug:
      below_threshold_mask = tf.Print(below_threshold_mask, [below_threshold_mask], message='below threshold index : ')
    labels = tf.scatter_update(labels, below_threshold_mask, tf.zeros((tf.shape(below_threshold_mask)[0],)))
    return labels
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def _build(self):
        """Build the linear layer.
        Two parameters: weight and bias.

        :return: None.
        """
        bound = math.sqrt(6.0 / (self._input_size + self._output_size))
        w_init = tf.random_uniform(
            minval=-bound,
            maxval=bound,
            shape=(self._input_size, self._output_size),
            dtype=D_TYPE,
            name='w_init'
        )
        self._w = tf.Variable(w_init, dtype=D_TYPE, name='w')
        if self._with_bias:
            b_init = tf.zeros(
                shape=(self._output_size,),
                dtype=D_TYPE,
                name='b_init'
            )
            self._b = tf.Variable(b_init, dtype=D_TYPE, name='b')
        else:
            self._b = None
        self._batch_norm = BatchNorm('bn', self._output_size) if self._with_batch_norm else None
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def _build(self):
        w_init = tf.random_normal(
            stddev=0.01,
            shape=(
                self._filter_height,
                self._filter_width,
                self._input_depth,
                self._output_depth
            ),
            dtype=D_TYPE,
            name='w_init'
        )
        b_init = tf.zeros(
            shape=(self._output_depth,),
            dtype=D_TYPE,
            name='b_init'
        )
        self._w = tf.Variable(w_init, dtype=D_TYPE, name='w')
        self._b = tf.Variable(b_init, dtype=D_TYPE, name='b')
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def _build(self):
        beta_init = tf.zeros(
            shape=self._size,
            dtype=D_TYPE
        )
        gamma_init = tf.ones(
            shape=self._size,
            dtype=D_TYPE
        )
        self._beta = tf.Variable(
            name='beta',
            initial_value=beta_init,
            dtype=D_TYPE
        )
        self._gamma = tf.Variable(
            name='gamma',
            initial_value=gamma_init,
            dtype=D_TYPE
        )
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def set_input_shape(self, input_shape):
        batch_size, rows, cols, input_channels = input_shape
        kernel_shape = tuple(self.kernel_shape) + (input_channels,
                                                   self.output_channels)
        assert len(kernel_shape) == 4
        assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
        init = tf.random_normal(kernel_shape, dtype=tf.float32)
        init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
                                                   axis=(0, 1, 2)))
        self.kernels = tf.Variable(init)
        self.b = tf.Variable(
            np.zeros((self.output_channels,)).astype('float32'))
        input_shape = list(input_shape)
        input_shape[0] = 1
        dummy_batch = tf.zeros(input_shape)
        dummy_output = self.fprop(dummy_batch)
        output_shape = [int(e) for e in dummy_output.get_shape()]
        output_shape[0] = 1
        self.output_shape = tuple(output_shape)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def test_record_doc_example(self):
    # Test to make sure examples from the documentation compile.
    example_datum = {'id': 8,
                     'name': 'Joe Smith',
                     'location': (2.5, 7.0)}
    num_ids = 16
    embed_len = 16
    td = tdb
    char_rnn = (td.InputTransform(lambda s: [ord(c) for c in s]) >>
                td.Map(td.Scalar('int32') >>
                       td.Function(tdl.Embedding(128, 16))) >>
                td.Fold(td.Concat() >> td.Function(tdl.FC(32)),
                        td.FromTensor(tf.zeros(32))))
    r = (td.Record([('id', (td.Scalar('int32') >>
                            td.Function(tdl.Embedding(num_ids, embed_len)))),
                    ('name', char_rnn),
                    ('location', td.Vector(2))])
         >> td.Concat() >> td.Function(tdl.FC(256)))
    with self.test_session():
      r.eval(example_datum)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def rnn_block(input_block, state_length):
  """Get a fully connected RNN block.

  The input is concatenated with the state vector and put through a fully
  connected layer to get the next state vector.

  Args:
    input_block: Put each input through this before concatenating it with the
      current state vector.
    state_length: Length of the RNN state vector.

  Returns:
    RNN Block (seq of input_block inputs -> output state)
  """
  combine_block = ((td.Identity(), input_block) >> td.Concat()
                   >> td.Function(td.FC(state_length)))
  return td.Fold(combine_block, tf.zeros(state_length))


# All characters are lowercase, so subtract 'a' to make them 0-indexed.
项目:WassersteinGAN.tensorflow    作者:shekkizh    | 项目源码 | 文件源码
def _discriminator(self, input_images, dims, train_phase, activation=tf.nn.relu, scope_name="discriminator",
                       scope_reuse=False):
        N = len(dims)
        with tf.variable_scope(scope_name) as scope:
            if scope_reuse:
                scope.reuse_variables()
            h = input_images
            skip_bn = True  # First layer of discriminator skips batch norm
            for index in range(N - 2):
                W = utils.weight_variable([4, 4, dims[index], dims[index + 1]], name="W_%d" % index)
                b = tf.zeros([dims[index+1]])
                h_conv = utils.conv2d_strided(h, W, b)
                if skip_bn:
                    h_bn = h_conv
                    skip_bn = False
                else:
                    h_bn = utils.batch_norm(h_conv, dims[index + 1], train_phase, scope="disc_bn%d" % index)
                h = activation(h_bn, name="h_%d" % index)
                utils.add_activation_summary(h)

            W_pred = utils.weight_variable([4, 4, dims[-2], dims[-1]], name="W_pred")
            b = tf.zeros([dims[-1]])
            h_pred = utils.conv2d_strided(h, W_pred, b)
        return None, h_pred, None  # Return the last convolution output. None values are returned to maintatin disc from other GAN
项目:mnist_LeNet    作者:LuxxxLucy    | 项目源码 | 文件源码
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
    ''' Adam optimizer '''
    updates = []
    if type(cost_or_grads) is not list:
        grads = tf.gradients(cost_or_grads, params)
    else:
        grads = cost_or_grads
    t = tf.Variable(1., 'adam_t')
    for p, g in zip(params, grads):
        mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
        if mom1 > 0:
            v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
            v_t = mom1 * v + (1. - mom1) * g
            v_hat = v_t / (1. - tf.pow(mom1, t))
            updates.append(v.assign(v_t))
        else:
            v_hat = g
        mg_t = mom2 * mg + (1. - mom2) * tf.square(g)
        mg_hat = mg_t / (1. - tf.pow(mom2, t))
        g_t = v_hat / tf.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append(mg.assign(mg_t))
        updates.append(p.assign(p_t))
    updates.append(t.assign_add(1))
    return tf.group(*updates)
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def __call__(self, u_t, a, b, scope=None):
        """

        :param u_t: [N, M, d]
        :param a: [N, M. 1]
        :param b: [N, M. 1]
        :param mask:  [N, M]
        :return:
        """
        N, M, d = self.batch_size, self.mem_size, self.hidden_size
        L, sL = self.L, self.sL
        with tf.name_scope(scope or self.__class__.__name__):
            L = tf.tile(tf.expand_dims(L, 0), [N, 1, 1])
            sL = tf.tile(tf.expand_dims(sL, 0), [N, 1, 1])
            logb = tf.log(b + 1e-9)
            logb = tf.concat(1, [tf.zeros([N, 1, 1]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])])
            left = L * tf.exp(tf.batch_matmul(L, logb * sL))  # [N, M, M]
            right = a * u_t  # [N, M, d]
            u = tf.batch_matmul(left, right)  # [N, M, d]
        return u
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def __call__(self, u_t, a, b, scope=None):
        """

        :param u_t: [N, M, d]
        :param a: [N, M. d]
        :param b: [N, M. d]
        :param mask:  [N, M]
        :return:
        """
        N, M, d = self.batch_size, self.mem_size, self.hidden_size
        L, sL = self.L, self.sL
        with tf.name_scope(scope or self.__class__.__name__):
            L = tf.tile(tf.expand_dims(tf.expand_dims(L, 0), 0), [N, d, 1, 1])
            sL = tf.tile(tf.expand_dims(tf.expand_dims(sL, 0), 0), [N, d, 1, 1])
            logb = tf.log(b + 1e-9)  # [N, M, d]
            logb = tf.concat(1, [tf.zeros([N, 1, d]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])])  # [N, M, d]
            logb = tf.expand_dims(tf.transpose(logb, [0, 2, 1]), -1)  # [N, d, M, 1]
            left = L * tf.exp(tf.batch_matmul(L, logb * sL))  # [N, d, M, M]
            right = a * u_t  # [N, M, d]
            right = tf.expand_dims(tf.transpose(right, [0, 2, 1]), -1)  # [N, d, M, 1]
            u = tf.batch_matmul(left, right)  # [N, d, M, 1]
            u = tf.transpose(tf.squeeze(u, [3]), [0, 2, 1])  # [N, M, d]
        return u
项目:qrn    作者:uwnlp    | 项目源码 | 文件源码
def __call__(self, u_t, a, b, scope=None):
        """

        :param u_t: [N, M, d]
        :param a: [N, M. 1]
        :param b: [N, M. 1]
        :param mask:  [N, M]
        :return:
        """
        N, M, d = self.batch_size, self.mem_size, self.hidden_size
        L, sL = self.L, self.sL
        with tf.name_scope(scope or self.__class__.__name__):
            L = tf.tile(tf.expand_dims(L, 0), [N, 1, 1])
            sL = tf.tile(tf.expand_dims(sL, 0), [N, 1, 1])
            logb = tf.log(b + 1e-9)
            logb = tf.concat(1, [tf.zeros([N, 1, 1]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])])
            left = L * tf.exp(tf.batch_matmul(L, logb * sL))  # [N, M, M]
            right = a * u_t  # [N, M, d]
            u = tf.batch_matmul(left, right)  # [N, M, d]
        return u
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def get_feed_dic_obs(self, obs):
    # needing to create all the nessisary feeds
    obs_x = []
    obs_y = []
    obs_tf = []

    for _ in range(OBS_SIZE):
      obs_x.append(np.zeros([N_BATCH,L]))
      obs_y.append(np.zeros([N_BATCH,L]))
      obs_tf.append(np.zeros([N_BATCH,2]))

    num_obs = len(obs)
    for ob_idx in range(num_obs):
      ob_coord, ob_lab = obs[ob_idx]
      ob_x, ob_y = vectorize(ob_coord)
      obs_x[ob_idx] = np.tile(ob_x, [50,1])
      obs_y[ob_idx] = np.tile(ob_y, [50,1])
      obs_tf[ob_idx] = np.tile(ob_lab, [50,1])

    feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf, 
                        obs_x + obs_y + obs_tf))
    return feed_dic
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def get_feed_dic_obs(self, obs):
    # needing to create all the nessisary feeds
    obs_x = []
    obs_y = []
    obs_tf = []

    for _ in range(OBS_SIZE):
      obs_x.append(np.zeros([N_BATCH,L]))
      obs_y.append(np.zeros([N_BATCH,L]))
      obs_tf.append(np.zeros([N_BATCH,2]))

    num_obs = len(obs)
    for ob_idx in range(num_obs):
      ob_coord, ob_lab = obs[ob_idx]
      ob_x, ob_y = vectorize(ob_coord)
      obs_x[ob_idx] = np.tile(ob_x, [50,1])
      obs_y[ob_idx] = np.tile(ob_y, [50,1])
      obs_tf[ob_idx] = np.tile(ob_lab, [50,1])

    feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf, 
                        obs_x + obs_y + obs_tf))
    return feed_dic
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
    '''Pads the 2nd and 3rd dimensions of a 4D tensor
    with "padding[0]" and "padding[1]" (resp.) zeros left and right.
    '''
    if dim_ordering == 'default':
        dim_ordering = image_dim_ordering()
    if dim_ordering not in {'th', 'tf'}:
        raise ValueError('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'th':
        pattern = [[0, 0], [0, 0],
                   [padding[0], padding[0]], [padding[1], padding[1]]]
    else:
        pattern = [[0, 0],
                   [padding[0], padding[0]], [padding[1], padding[1]],
                   [0, 0]]
    return tf.pad(x, pattern)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def create(self):
        config = self.config

        d2 = dict(config.discriminator)
        d2['class'] = self.ops.lookup("class:hypergan.discriminators.pyramid_discriminator.PyramidDiscriminator")
        self.encoder = self.create_component(d2)
        self.encoder.ops.describe("encoder")
        self.encoder.create(self.inputs.x)
        self.encoder.z = tf.zeros(0)
        self.trainer = self.create_component(config.trainer)

        StandardGAN.create(self)
        cycloss = tf.reduce_mean(tf.abs(self.inputs.x-self.generator.sample))
        cycloss_lambda = config.cycloss_lambda or 10
        self.loss.sample[1] *= config.g_lambda or 1
        self.loss.sample[1] += cycloss*cycloss_lambda
        self.trainer.create()

        self.session.run(tf.global_variables_initializer())
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def test_relation_layer(self):
        component = GANComponent(gan=gan, config={'test':True})
        with self.test_session():
            constant = tf.zeros([1, 2, 2, 1])
            split = component.split_by_width_height(constant)
            self.assertEqual(len(split), 4)
            permute = component.permute(split, 2)
            self.assertEqual(len(permute), 12)
            rel_layer = component.relation_layer(constant)
            self.assertEqual(gan.ops.shape(rel_layer), [1,2,2,1])

            constant = tf.zeros([1, 4, 4, 1])
            split = component.split_by_width_height(constant)
            self.assertEqual(len(split), 16)
            permute = component.permute(split, 2)
            self.assertEqual(len(permute), 240)
            rel_layer = component.relation_layer(constant)
            self.assertEqual(gan.ops.shape(rel_layer), [1,4,4,1])
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def sample(self, n, max_length=None, z=None, **kwargs):
    """Sample with an optional conditional embedding `z`."""
    if z is not None and z.shape[0].value != n:
      raise ValueError(
          '`z` must have a first dimension that equals `n` when given. '
          'Got: %d vs %d' % (z.shape[0].value, n))

    if self.hparams.conditional and z is None:
      tf.logging.warning(
          'Sampling from conditional model without `z`. Using random `z`.')
      normal_shape = [n, self.hparams.z_size]
      normal_dist = tf.contrib.distributions.Normal(
          loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
      z = normal_dist.sample()

    return self.decoder.sample(n, max_length, z, **kwargs)
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def tf_ispecgram(spec,
                 n_fft=512,
                 hop_length=None,
                 mask=True,
                 pad=True,
                 log_mag=True,
                 re_im=False,
                 dphase=True,
                 mag_only=False,
                 num_iters=1000):
  dims = spec.get_shape().as_list()
  # Add back in nyquist frequency
  x = spec if not pad else tf.concat(
      [spec, tf.zeros([dims[0], 1, dims[2], dims[3]])], 1)
  audio = tf.py_func(batch_ispecgram, [
      x, n_fft, hop_length, mask, log_mag, re_im, dphase, mag_only, num_iters
  ], tf.float32)
  return audio


#---------------------------------------------------
# Summaries
#---------------------------------------------------
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def build_lstm_inner(H, lstm_input):
  '''
  build lstm decoder
  '''
  lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
  if H['num_lstm_layers'] > 1:
    lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
  else:
    lstm = lstm_cell

  batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
  state = tf.zeros([batch_size, lstm.state_size])

  outputs = []
  with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
    for time_step in range(H['rnn_len']):
      if time_step > 0: tf.get_variable_scope().reuse_variables()
      output, state = lstm(lstm_input, state)
      outputs.append(output)
  return outputs
项目:NER-LSTM-CRF    作者:liu-nlper    | 项目源码 | 文件源码
def map_item2id(items, voc, max_len, none_word=1, lower=False, init_value=0, allow_error=True):
    """
    ?word/pos????id
    Args:
        items: list, ?????
        voc: ??
        max_len: int, ??????
        none_word: ??????,???0
        lower: bool, ???????
        init_value: default is 0, ?????
    Returns:
        arr: np.array, dtype=int32, shape=[max_len,]
    """
    assert type(none_word) == int
    arr = np.zeros((max_len,), dtype='int32') + init_value
    min_range = min(max_len, len(items))
    for i in range(min_range):  # ?items????max_len?????
        item = items[i] if not lower else items[i].lower()
        if allow_error:
            arr[i] = voc[item] if item in voc else none_word
        else:
            arr[i] = voc[item]
    return arr
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def vae(observed, n, n_x, n_z, n_k, tau, n_particles, relaxed=False):
    with zs.BayesianNet(observed=observed) as model:
        z_stacked_logits = tf.zeros([n, n_z, n_k])
        if relaxed:
            z = zs.ExpConcrete('z', tau, z_stacked_logits,
                               n_samples=n_particles, group_ndims=1)
            z = tf.exp(tf.reshape(z, [n_particles, n, n_z * n_k]))
        else:
            z = zs.OnehotCategorical(
                'z', z_stacked_logits, n_samples=n_particles, group_ndims=1,
                dtype=tf.float32)
            z = tf.reshape(z, [n_particles, n, n_z * n_k])
        lx_z = tf.layers.dense(z, 200, activation=tf.tanh)
        lx_z = tf.layers.dense(lx_z, 200, activation=tf.tanh)
        x_logits = tf.layers.dense(lx_z, n_x)
        x = zs.Bernoulli('x', x_logits, group_ndims=1)
    return model
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_Normal(self):
        with BayesianNet():
            mean = tf.zeros([2, 3])
            logstd = tf.zeros([2, 3])
            std = tf.exp(logstd)
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Normal('a', mean, logstd=logstd, n_samples=n_samples,
                       group_ndims=group_ndims)
            b = Normal('b', mean, std=std, n_samples=n_samples,
                       group_ndims=group_ndims)

        for st in [a, b]:
            sample_ops = set(get_backward_ops(st.tensor))
            for i in [mean, logstd, n_samples]:
                self.assertTrue(i.op in sample_ops)
            log_p = st.log_prob(np.ones([2, 3]))
            log_p_ops = set(get_backward_ops(log_p))
            for i in [mean, logstd, group_ndims]:
                self.assertTrue(i.op in log_p_ops)
            self.assertTrue(a.get_shape()[1:], mean.get_shape())
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_Binomial(self):
        with BayesianNet():
            logits = tf.zeros([2, 3])
            n_experiments = tf.placeholder(tf.int32, shape=[])
            n_samples = tf.placeholder(tf.int32, shape=[])
            group_ndims = tf.placeholder(tf.int32, shape=[])
            a = Binomial('a', logits, n_experiments, n_samples,
                         group_ndims)
        sample_ops = set(get_backward_ops(a.tensor))
        for i in [logits, n_experiments, n_samples]:
            self.assertTrue(i.op in sample_ops)
        log_p = a.log_prob(np.ones([2, 3], dtype=np.int32))
        log_p_ops = set(get_backward_ops(log_p))
        for i in [logits, n_experiments, group_ndims]:
            self.assertTrue(i.op in log_p_ops)
        self.assertTrue(a.get_shape()[1:], logits.get_shape())
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def test_init(self):
        with self.test_session(use_gpu=True):
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]))
            with self.assertRaisesRegexp(
                    ValueError, "Either.*should be passed but not both"):
                Normal(mean=tf.ones([2, 1]), std=1., logstd=0.)
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), logstd=tf.zeros([2, 4, 3]))
            with self.assertRaisesRegexp(ValueError,
                                         "should be broadcastable to match"):
                Normal(mean=tf.ones([2, 1]), std=tf.ones([2, 4, 3]))

        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               logstd=tf.placeholder(tf.float32, [None, 1, 3]))
        Normal(mean=tf.placeholder(tf.float32, [None, 1]),
               std=tf.placeholder(tf.float32, [None, 1, 3]))
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            cell = self.get_cell()
            # Stacks layers of RNN's to form a stacked decoder
            self.cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.num_layers)

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            self.cell = self.get_cell()

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def get_init_state(self, batch_size):
        '''
        Construct the initial state of the grammar state machine.

        Returns:
            A tensor of dtype tf.int32 with shape (batch_size,)
        '''
        return tf.zeros((batch_size,), dtype=tf.int32)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def zero_state(self, batch_size, dtype=tf.float32):
        zeros = tf.zeros((batch_size, self._num_cells), dtype=dtype)
        return LSTMStateTuple(zeros, zeros)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def create_model(self,
                     model_input,
                     vocab_size,
                     num_frames,
                     **unused_params):

        shape = model_input.get_shape().as_list()
        frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2)
        frames_true = tf.ones(tf.shape(frames_sum))
        frames_false = tf.zeros(tf.shape(frames_sum))
        frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1])

        activation_1 = tf.reduce_max(model_input, axis=1)
        activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6)
        activation_3 = tf.reduce_min(model_input, axis=1)

        model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max")
        model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean")
        model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min")
        final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1)
        weight2d = tf.get_variable("ensemble_weight2d",
                                   shape=[shape[2], 3, vocab_size],
                                   regularizer=slim.l2_regularizer(1.0e-8))
        activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2)
        weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1)
        result = {}
        result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size])
        result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1)
        return result
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_support(self, labels, support_type=None):
    if support_type == None:
      support_type = FLAGS.support_type
    if "," in support_type:
      new_labels = []
      for st in support_type.split(","):
        new_labels.append(tf.cast(self.get_support(labels, st), dtype=tf.float32))
      support_labels = tf.concat(new_labels, axis=1)
      return support_labels
    elif support_type == "vertical":
      num_classes = FLAGS.num_classes
      num_verticals = FLAGS.num_verticals
      vertical_file = FLAGS.vertical_file
      vertical_mapping = np.zeros([num_classes, num_verticals], dtype=np.float32)
      float_labels = tf.cast(labels, dtype=tf.float32)
      with open(vertical_file) as F:
        for line in F:
          group = map(int, line.strip().split())
          if len(group) == 2:
            x, y = group
            vertical_mapping[x, y] = 1
      vm_init = tf.constant_initializer(vertical_mapping)
      vm = tf.get_variable("vm", shape = [num_classes, num_verticals], 
                           trainable=False, initializer=vm_init)
      vertical_labels = tf.matmul(float_labels, vm)
      return tf.cast(vertical_labels > 0.2, tf.float32)
    elif support_type == "frequent":
      num_frequents = FLAGS.num_frequents
      frequent_labels = tf.slice(labels, begin=[0, 0], size=[-1, num_frequents])
      frequent_labels = tf.cast(frequent_labels, dtype=tf.float32)
      return frequent_labels
    elif support_type == "label":
      float_labels = tf.cast(labels, dtype=tf.float32)
      return float_labels
    else:
      raise NotImplementedError()
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def get_support(self, labels, support_type=None):
    if support_type == None:
      support_type = FLAGS.support_type
    if "," in support_type:
      new_labels = []
      for st in support_type.split(","):
        new_labels.append(tf.cast(self.get_support(labels, st), dtype=tf.float32))
      support_labels = tf.concat(new_labels, axis=1)
      return support_labels
    elif support_type == "vertical":
      num_classes = FLAGS.num_classes
      num_verticals = FLAGS.num_verticals
      vertical_file = FLAGS.vertical_file
      vertical_mapping = np.zeros([num_classes, num_verticals], dtype=np.float32)
      float_labels = tf.cast(labels, dtype=tf.float32)
      with open(vertical_file) as F:
        for line in F:
          group = map(int, line.strip().split())
          if len(group) == 2:
            x, y = group
            vertical_mapping[x, y] = 1
      vm_init = tf.constant_initializer(vertical_mapping)
      vm = tf.get_variable("vm", shape = [num_classes, num_verticals], 
                           trainable=False, initializer=vm_init)
      vertical_labels = tf.matmul(float_labels, vm)
      return tf.cast(vertical_labels > 0.2, tf.float32)
    elif support_type == "frequent":
      num_frequents = FLAGS.num_frequents
      frequent_labels = tf.slice(labels, begin=[0, 0], size=[-1, num_frequents])
      frequent_labels = tf.cast(frequent_labels, dtype=tf.float32)
      return frequent_labels
    elif support_type == "label":
      float_labels = tf.cast(labels, dtype=tf.float32)
      return float_labels
    else:
      raise NotImplementedError()
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def compute_budget_loss(model, loss, updated_states, cost_per_sample):
    """
    Compute penalization term on the number of updated states (i.e. used samples)
    """
    if using_skip_rnn(model):
        return tf.reduce_mean(tf.reduce_sum(cost_per_sample * updated_states, 1), 0)
    else:
        return tf.zeros(loss.get_shape())
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _create(self):
    zero_state = nest.map_structure(
        lambda x: tf.zeros([self.batch_size, x], dtype=tf.float32),
        self.decoder_state_size)
    return zero_state