Python tensorflow 模块,get_variable_scope() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.get_variable_scope()

项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = []
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = []
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def __call__(self, *args):
        if args in self.cache:
            print("(%s) retrieving value from cache"%self.name)
            return self.cache[args]
        with tf.variable_scope(self.name, reuse=not self.first_time):
            scope = tf.get_variable_scope().name
            if self.first_time:
                self.scope = scope
                print("(%s) running function for the first time"%self.name)
            else:
                assert self.scope == scope, "Tried calling function with a different scope"
                print("(%s) running function on new inputs"%self.name)
            self.first_time = False
            out = self._call(*args)
        self.cache[args] = out
        return out
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def _build(self, initial_state, helper):
    if not self.initial_state:
      self._setup(initial_state, helper)

    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    maximum_iterations = None
    if self.mode == tf.contrib.learn.ModeKeys.INFER:
      maximum_iterations = self.params["max_decode_length"]

    outputs, final_state = dynamic_decode(
        decoder=self,
        output_time_major=True,
        impute_finished=False,
        maximum_iterations=maximum_iterations)
    return self.finalize(outputs, final_state)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, state = tf.nn.dynamic_rnn(
        cell=cell,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)
    return EncoderOutput(
        outputs=outputs,
        final_state=state,
        attention_values=outputs,
        attention_values_length=sequence_length)
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, states = tf.nn.bidirectional_dynamic_rnn(
        cell_fw=cell_fw,
        cell_bw=cell_bw,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)

    # Concatenate outputs and states of the forward and backward RNNs
    outputs_concat = tf.concat(outputs, 2)

    return EncoderOutput(
        outputs=outputs_concat,
        final_state=states,
        attention_values=outputs_concat,
        attention_values_length=sequence_length)
项目:dcan-tensorflow    作者:lisjin    | 项目源码 | 文件源码
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    Args:
        name: name of the variable
        shape: list of ints
        stddev: standard deviation of a truncated Gaussian
        wd: add L2Loss weight decay multiplied by this float. If None, weight
            decay is not added for this Variable.
    Returns:
        Variable Tensor
    """
    dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
    if wd is not None and not tf.get_variable_scope().reuse:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def __call__(self, inputs, reuse = True):
        with tf.variable_scope(self.name) as vs:
            tf.get_variable_scope()
            if reuse:
                vs.reuse_variables()

            x1, down1 = down_block(self.block_fn, 64)(inputs)
            x2, down2 = down_block(self.block_fn, 128)(down1)
            x3, down3 = down_block(self.block_fn, 256)(down2)

            down3 = self.block_fn(512)(down3)

            up3 = up_block(self.block_fn, 256)(x3, down3)
            up2 = up_block(self.block_fn, 128)(x2, up3)
            up1 = up_block(self.block_fn, 64)(x1, up2)

            outputs = tcl.conv2d(up1,
                                 num_outputs = self.output_ch,
                                 kernel_size = (1, 1),
                                 stride = (1, 1),
                                 padding = 'SAME')

            return outputs
项目:tfplus    作者:renmengye    | 项目源码 | 文件源码
def build(self, inp):
        # Divide input equally.
        self.lazy_init_var()
        inp_list = []
        output = []
        for ii in xrange(self.num_replica):
            with tf.name_scope('%s_%d' % ('replica', ii)) as scope:
                device = '/gpu:{}'.format(ii)
                with tf.device(device):
                    tf.get_variable_scope().reuse_variables()
                    inp_ = {
                        'x': inp['x_{}'.format(ii)],
                        'y_gt': inp['y_gt_{}'.format(ii)],
                        'phase_train': inp['phase_train']
                    }
                    output.append(self.sub_models[ii].build(inp_))
                    inp_list.append(inp_)
        self.output_list = output
        self.input_list = inp_list
        output = tf.concat(0, [oo['y_out'] for oo in output])
        self.register_var('y_out', output)
        output2 = tf.concat(0, [mm.get_var('score_out')
                                for mm in self.sub_models])
        self.register_var('score_out', output2)
        return {'y_out': output}
项目:neural-fonts    作者:periannath    | 项目源码 | 文件源码
def encoder(self, images, is_training, reuse=False):
        with tf.variable_scope("generator"):
            if reuse:
                tf.get_variable_scope().reuse_variables()

            encode_layers = dict()

            def encode_layer(x, output_filters, layer):
                act = lrelu(x)
                conv = conv2d(act, output_filters=output_filters, scope="g_e%d_conv" % layer)
                enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
                encode_layers["e%d" % layer] = enc
                return enc

            e1 = conv2d(images, self.generator_dim, scope="g_e1_conv")
            encode_layers["e1"] = e1
            e2 = encode_layer(e1, self.generator_dim * 2, 2)
            e3 = encode_layer(e2, self.generator_dim * 4, 3)
            e4 = encode_layer(e3, self.generator_dim * 8, 4)
            e5 = encode_layer(e4, self.generator_dim * 8, 5)
            e6 = encode_layer(e5, self.generator_dim * 8, 6)
            e7 = encode_layer(e6, self.generator_dim * 8, 7)
            e8 = encode_layer(e7, self.generator_dim * 8, 8)

            return e8, encode_layers
项目:neural-fonts    作者:periannath    | 项目源码 | 文件源码
def discriminator(self, image, is_training, reuse=False):
        with tf.variable_scope("discriminator"):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            h0 = lrelu(conv2d(image, self.discriminator_dim, scope="d_h0_conv"))
            h1 = lrelu(batch_norm(conv2d(h0, self.discriminator_dim * 2, scope="d_h1_conv"),
                                  is_training, scope="d_bn_1"))
            h2 = lrelu(batch_norm(conv2d(h1, self.discriminator_dim * 4, scope="d_h2_conv"),
                                  is_training, scope="d_bn_2"))
            h3 = lrelu(batch_norm(conv2d(h2, self.discriminator_dim * 8, sh=1, sw=1, scope="d_h3_conv"),
                                  is_training, scope="d_bn_3"))
            # real or fake binary loss
            fc1 = fc(tf.reshape(h3, [self.batch_size, -1]), 1, scope="d_fc1")
            # category loss
            fc2 = fc(tf.reshape(h3, [self.batch_size, -1]), self.embedding_num, scope="d_fc2")

            return tf.nn.sigmoid(fc1), fc1, fc2
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def build(self):
        """Build the widget.
        The main purpose of this function is to create the trainable variables (parameters) for the widget.

        :return: None.
        """
        if self._built:
            return self
        else:
            if self._name is None:
                #
                # Build WITHOUT scope.
                self._build()
                self._built = True
                return self
            else:
                #
                # Build WITH scope.
                self._scope = tf.get_variable_scope().name
                with tf.variable_scope(self._name):
                    self._build()
                    self._built = True
                    return self
项目:paraphrase-id-tensorflow    作者:nelson-liu    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        # Get the dropped-out outputs and state
        outputs_do, new_state_do = super(SwitchableDropoutWrapper,
                                         self).__call__(
                                             inputs, state, scope=scope)
        tf.get_variable_scope().reuse_variables()
        # Get the un-dropped-out outputs and state
        outputs, new_state = self._cell(inputs, state, scope)

        # Set the outputs and state to be the dropped out version if we are
        # training, and no dropout if we are not training.
        outputs = tf.cond(self.is_train, lambda: outputs_do,
                          lambda: outputs * (self._output_keep_prob))
        if isinstance(state, tuple):
            new_state = state.__class__(
                *[tf.cond(self.is_train, lambda: new_state_do_i,
                          lambda: new_state_i)
                  for new_state_do_i, new_state_i in
                  zip(new_state_do, new_state)])
        else:
            new_state = tf.cond(self.is_train, lambda: new_state_do,
                                lambda: new_state)
        return outputs, new_state
项目:basic-encoder-decoder    作者:pemywei    | 项目源码 | 文件源码
def decode(self, cell, init_state, loop_function=None):
        outputs = []
        prev = None
        state = init_state
        for i, inp in enumerate(self.decoder_inputs_emb):

            if loop_function is not None and prev is not None:
                with tf.variable_scope("loop_function", reuse=True):
                    inp = loop_function(prev, i)
            if i > 0:
                tf.get_variable_scope().reuse_variables()
            output, state = cell(inp, state)
            # print output.eval()
            outputs.append(output)
            if loop_function is not None:
                prev = output
        return outputs
项目:IntelAct-Vizdoom    作者:chendagui16    | 项目源码 | 文件源码
def __make_net(self, input_images, input_measure, input_actions, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        fc_val_params = copy.deepcopy(self.__fc_joint_params)
        fc_val_params[-1]['out_dims'] = self.__target_dim

        fc_adv_params = copy.deepcopy(self.__fc_joint_params)
        fc_adv_params[-1]['out_dims'] = len(self.__net_discrete_actions) * self.__target_dim

        if self.verbose:
            print 'fc_val_params:', fc_val_params
            print 'fc_adv_params:', fc_adv_params

        p_img_conv = ly.conv_encoder(input_images, self.__conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = ly.fc_net(ly.flatten(p_img_conv), self.__fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = ly.fc_net(input_measure, self.__fc_measure_params, 'p_meas_fc', msra_coeff=0.9)
        p_val_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True)

        self.__pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.__net_discrete_actions), self.__target_dim])
        self.__pred_all = self.__pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.__target_dim])
        self.__pred_relevant = tf.boolean_mask(self.__pred_all, tf.cast(input_actions, tf.bool))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
                            spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_19(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:cancer    作者:yancz1989    | 项目源码 | 文件源码
def build_lstm_inner(H, lstm_input):
  '''
  build lstm decoder
  '''
  lstm_cell = rnn_cell.BasicLSTMCell(H['lstm_size'], forget_bias=0.0, state_is_tuple=False)
  if H['num_lstm_layers'] > 1:
    lstm = rnn_cell.MultiRNNCell([lstm_cell] * H['num_lstm_layers'], state_is_tuple=False)
  else:
    lstm = lstm_cell

  batch_size = H['batch_size'] * H['grid_height'] * H['grid_width']
  state = tf.zeros([batch_size, lstm.state_size])

  outputs = []
  with tf.variable_scope('RNN', initializer=tf.random_uniform_initializer(-0.1, 0.1)):
    for time_step in range(H['rnn_len']):
      if time_step > 0: tf.get_variable_scope().reuse_variables()
      output, state = lstm(lstm_input, state)
      outputs.append(output)
  return outputs
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:dwt    作者:min2209    | 项目源码 | 文件源码
def get_conv_filter(self, params):
        if params["name"]+"/weights" in self.modelDict:
            init = tf.constant_initializer(value=self.modelDict[params["name"]+"/weights"], dtype=tf.float32)
            var = tf.get_variable(name="weights", initializer=init, shape=params["shape"])
            print "loaded " + params["name"]+"/weights"
        else:
            if params["std"]:
                stddev = params["std"]
            else:
                fanIn = params["shape"][0]*params["shape"][1]*params["shape"][2]
                stddev = (2/float(fanIn))**0.5

            init = tf.truncated_normal(shape=params["shape"], stddev=stddev, seed=0)
            var = tf.get_variable(name="weights", initializer=init)
            print "generated " + params["name"] + "/weights"

        if not tf.get_variable_scope().reuse:
            weightDecay = tf.mul(tf.nn.l2_loss(var), self._wd,
                                  name='weight_loss')
            tf.add_to_collection('losses', weightDecay)

        return var
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def __call__(self, shape, dtype=None, partition_info=None):
    # Creating different RestoreV2 ops when a single one could
    # output several tensors seems inefficient, but that's actually
    # what tf.Saver.restore_op (via tf.BaseSaverBuilder) does too.
    if self._scope is None:
      scope_name = tf.get_variable_scope().name
    elif callable(self._scope):
      scope_name = self._scope(tf.get_variable_scope().name)
    else:
      scope_name = self._scope
    tensor_name = self._var_name
    if scope_name:
      tensor_name = '{}/{}'.format(scope_name, tensor_name)
    tensor = io_ops.restore_v2(
        self._filename,
        [tensor_name],
        [self._partition_spec(shape, partition_info)],
        [dtype])[0]
    tensor.set_shape(shape)
    return tensor


# pylint: disable=invalid-name
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def _build(self, initial_state, helper):
    if not self.initial_state:
      self._setup(initial_state, helper)

    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    maximum_iterations = None
    if self.mode == tf.contrib.learn.ModeKeys.INFER:
      maximum_iterations = self.params["max_decode_length"]

    outputs, final_state = dynamic_decode(
        decoder=self,
        output_time_major=True,
        impute_finished=False,
        maximum_iterations=maximum_iterations)
    return self.finalize(outputs, final_state)
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def linear_mapping_stupid(inputs, out_dim, in_dim=None, dropout=1.0, var_scope_name="linear_mapping"):
  with tf.variable_scope(var_scope_name):
    print('name', tf.get_variable_scope().name) 
    input_shape_tensor = tf.shape(inputs)   # dynamic shape, no None
    input_shape = inputs.get_shape().as_list()    # static shape. may has None
    print('input_shape', input_shape)
    assert len(input_shape) == 3
    inputs = tf.reshape(inputs, [-1, input_shape_tensor[-1]])

    linear_mapping_w = tf.get_variable("linear_mapping_w", [input_shape[-1], out_dim], initializer=tf.random_normal_initializer(mean=0, stddev=tf.sqrt(dropout*1.0/input_shape[-1])))
    linear_mapping_b = tf.get_variable("linear_mapping_b", [out_dim], initializer=tf.zeros_initializer())


    output = tf.matmul(inputs, linear_mapping_w) + linear_mapping_b
    print('xxxxx_params', input_shape, out_dim)
    #output = tf.reshape(output, [input_shape[0], -1, out_dim])
    output = tf.reshape(output, [input_shape_tensor[0], -1, out_dim])

  return output
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def encode(self, inputs, sequence_length, **kwargs):
    scope = tf.get_variable_scope()
    scope.set_initializer(tf.random_uniform_initializer(
        -self.params["init_scale"],
        self.params["init_scale"]))

    cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
    outputs, state = tf.nn.dynamic_rnn(
        cell=cell,
        inputs=inputs,
        sequence_length=sequence_length,
        dtype=tf.float32,
        **kwargs)
    return EncoderOutput(
        outputs=outputs,
        final_state=state,
        attention_values=outputs,
        attention_values_length=sequence_length)
项目:DirectFuturePrediction    作者:IntelVCL    | 项目源码 | 文件源码
def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        self.fc_joint_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
        p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
        if isinstance(self.fc_obj_params, np.ndarray):
            p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
        else:
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
            if self.random_objective_coeffs:
                raise Exception('Need fc_obj_params with randomized objectives')

        p_joint_fc = my_ops.fc_net(p_concat_fc, self.fc_joint_params, 'p_joint_fc', last_linear=True, msra_coeff=0.9)
        pred_all = tf.reshape(p_joint_fc, [-1, len(self.net_discrete_actions), self.target_dim])
        pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))

        return pred_all, pred_relevant
项目:show-adapt-and-tell    作者:tsenghungchen    | 项目源码 | 文件源码
def domain_classifier(self, images, name="G", reuse=False): 
    random_uniform_init = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)    
    with tf.variable_scope(name):
        tf.get_variable_scope().reuse_variables()
        with tf.variable_scope("images"):
                # "generator/images"
                images_W = tf.get_variable("images_W", [self.img_dims, self.G_hidden_size], "float32", random_uniform_init)
        images_emb = tf.matmul(images, images_W)    # B,H

        l2_loss = tf.constant(0.0)
    with tf.variable_scope("domain"):
        if reuse:
        tf.get_variable_scope().reuse_variables()
        with tf.variable_scope("output"):
            output_W = tf.get_variable("output_W", [self.G_hidden_size, self.num_domains],
                                                "float32", random_uniform_init)
                output_b = tf.get_variable("output_b", [self.num_domains], "float32", random_uniform_init)
        l2_loss += tf.nn.l2_loss(output_W)
        l2_loss += tf.nn.l2_loss(output_b)
        logits = tf.nn.xw_plus_b(images_emb, output_W, output_b, name="logits")
        predictions = tf.argmax(logits, 1, name="predictions")

        return predictions, logits, l2_loss
项目:relaax    作者:deeplearninc    | 项目源码 | 文件源码
def build_graph(self, x, batch_size=1, n_units=256):
        self.phs = [graph.Placeholder(np.float32, [batch_size, n_units]) for _ in range(2)]
        self.ph_state = graph.TfNode(tuple(ph.node for ph in self.phs))
        self.ph_state.checked = tuple(ph.checked for ph in self.phs)

        self.zero_state = tuple(np.zeros([batch_size, n_units]) for _ in range(2))

        state = tf.contrib.rnn.LSTMStateTuple(*self.ph_state.checked)

        lstm = tf.contrib.rnn.BasicLSTMCell(n_units, state_is_tuple=True)

        outputs, self.state = tf.nn.dynamic_rnn(lstm, x.node, initial_state=state,
                                                sequence_length=tf.shape(x.node)[1:2], time_major=False)

        self.state = graph.TfNode(self.state)
        self.weight = graph.TfNode(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                                     tf.get_variable_scope().name))
        return outputs
项目:piecewisecrf    作者:Vaan5    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000
    with self.test_session() as sess:
      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
      inception.inception_v3(train_inputs, num_classes)
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
      logits, _ = inception.inception_v3(eval_inputs, num_classes,
                                         is_training=False)
      predictions = tf.argmax(logits, 1)
      sess.run(tf.initialize_all_variables())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
                            spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_19(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
                                     spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
                                    spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:terngrad    作者:wenwei202    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000
    with self.test_session() as sess:
      train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
      inception.inception_v3(train_inputs, num_classes)
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
      logits, _ = inception.inception_v3(eval_inputs, num_classes,
                                         is_training=False)
      predictions = tf.argmax(logits, 1)
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
项目:EKLAVYA    作者:shensq04    | 项目源码 | 文件源码
def probability(self):
        def lstm_cell():
            if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
                return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
            else:
                return tf.contrib.rnn.GRUCell(self.emb_dim)

        attn_cell = lstm_cell
        if self.dropout < 1:
            def attn_cell():
                return tf.contrib.rnn.DropoutWrapper(
                    lstm_cell(), output_keep_prob=self._keep_prob)
        single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)

        output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
                                          sequence_length=self._length)
        weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
        bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))

        self.output = output
        probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
        return probability
项目:EKLAVYA    作者:shensq04    | 项目源码 | 文件源码
def probability(self):
        def lstm_cell():
            if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
                return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
            else:
                return tf.contrib.rnn.GRUCell(self.emb_dim)

        attn_cell = lstm_cell
        if self.dropout < 1:
            def attn_cell():
                return tf.contrib.rnn.DropoutWrapper(
                    lstm_cell(), output_keep_prob=self._keep_prob)
        single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)

        output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
                                          sequence_length=self._length)
        weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
        bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))

        self.output = output
        probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
        return probability
项目:DeepRL    作者:arnomoonens    | 项目源码 | 文件源码
def __init__(self, state_shape, n_hidden, summary=True):
        super(CriticNetwork, self).__init__()
        self.state_shape = state_shape
        self.n_hidden = n_hidden

        with tf.variable_scope("critic"):
            self.states = tf.placeholder("float", [None] + self.state_shape, name="states")
            self.r = tf.placeholder(tf.float32, [None], name="r")

            L1 = tf.contrib.layers.fully_connected(
                inputs=self.states,
                num_outputs=self.n_hidden,
                activation_fn=tf.tanh,
                weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02),
                biases_initializer=tf.zeros_initializer(),
                scope="L1")

            self.value = tf.reshape(linear(L1, 1, "value", normalized_columns_initializer(1.0)), [-1])

            self.loss = tf.reduce_sum(tf.square(self.value - self.r))
            self.summary_loss = self.loss
            self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
项目:DCGAN-TensorFlow    作者:JunhongXu    | 项目源码 | 文件源码
def conv2d(x, num_kernels, kernel_h=5, kernel_w=5, strides=2, padding="VALID", name="conv2d",
           use_bn=True, activation=tf.nn.relu, alpha=None, is_train=True, stddv=0.02):
    """
    Wrapper function for convolutional layer
    """
    n, h, w, c = x.get_shape().as_list()
    with tf.variable_scope(name):
        w = tf.get_variable(name="weight", initializer=tf.truncated_normal_initializer(stddev=stddv),
                            shape=(kernel_h, kernel_w, c, num_kernels))
        bias = tf.get_variable(name="bias", initializer=tf.constant_initializer(0.01), shape=num_kernels)
        y = tf.nn.conv2d(x, w, (1, strides, strides, 1), padding)
        y = tf.nn.bias_add(y, bias)

        if use_bn:
            y = batch_norm(y, tf.get_variable_scope().name, is_train)

        print("Convolutional 2D Layer %s, kernel size %s, output size %s Reuse:%s"
              % (tf.get_variable_scope().name, (kernel_h, kernel_w, c, num_kernels), y.get_shape().as_list(),
                 tf.get_variable_scope().reuse))
        if alpha is None:
            y = activation(y)
        else:
            y = activation(y, alpha)
    return y
项目:DCGAN-TensorFlow    作者:JunhongXu    | 项目源码 | 文件源码
def transpose_conv2d(x, output_shape, kernel_h=5, kernel_w=5, activation=tf.nn.relu, stride=2, padding="VALID",
                     use_bn=True, is_train=True, stddv=0.02, name="transpose_conv2d"):
    n, h, w, c = x.get_shape().as_list()
    num_kernels = output_shape[-1]
    with tf.variable_scope(name):
        w = tf.get_variable(name="weight", initializer=tf.truncated_normal_initializer(stddev=stddv),
                            shape=(kernel_h, kernel_w, num_kernels, c))
        bias = tf.get_variable(name="bias", initializer=tf.constant_initializer(0.01), shape=num_kernels)
        y = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, padding=padding,
                                   strides=(1, stride, stride, 1))
        y = tf.nn.bias_add(y, bias)
        if use_bn:
            y = batch_norm(y, tf.get_variable_scope().name, is_train)

        print("Transposed Convolutional 2D Layer %s, kernel size %s, output size %s Reuse:%s"
              % (tf.get_variable_scope().name, (kernel_h, kernel_w, c, num_kernels), y.get_shape().as_list(),
                 tf.get_variable_scope().reuse))
    return activation(y)
项目:DCGAN-TensorFlow    作者:JunhongXu    | 项目源码 | 文件源码
def dense_layer(x, num_neurons, name, activation, use_bn=False, is_train=True, stddv=0.02):
    if len(x.get_shape().as_list()) > 2:
        n, h, w, c = x.get_shape().as_list()
        d = h * w * c
    else:
        n, d = x.get_shape().as_list()
    with tf.variable_scope(name):
        # flatten x
        x = tf.reshape(x, (-1, d))
        w = tf.get_variable("weight", shape=(d, num_neurons), initializer=tf.random_normal_initializer(stddev=stddv))
        b = tf.get_variable("bias", shape=num_neurons, initializer=tf.constant_initializer(0.01))
        y = tf.matmul(x, w) + b
        if use_bn:
            y = batch_norm(y, name=tf.get_variable_scope().name, is_train=is_train)
        print("Dense Layer %s, output size %s" % (tf.get_variable_scope().name, y.get_shape().as_list()))
    return activation(y)
项目:DCGAN-TensorFlow    作者:JunhongXu    | 项目源码 | 文件源码
def discriminator(self, inpt, reuse, is_train):
        """
        Build D for training or testing. If reuse if True, the input should be the output of generator
        """
        with tf.variable_scope("discriminator"):
            if reuse:
                tf.get_variable_scope().reuse_variables()
            net = conv2d(x=inpt, num_kernels=self.d_init, name="conv1", activation=lkrelu, padding="SAME",
                         alpha=0.02, is_train=is_train, stddv=self.stddv)
            net = conv2d(x=net, num_kernels=self.d_init*2, name="conv2", activation=lkrelu, padding="SAME",
                         alpha=0.02, is_train=is_train, stddv=self.stddv)
            net = conv2d(x=net, num_kernels=self.d_init*4, name="conv3", activation=lkrelu, padding="SAME",
                         alpha=0.02, is_train=is_train, stddv=self.stddv)
            net = conv2d(x=net, num_kernels=self.d_init*8, name="conv4", activation=lkrelu, padding="SAME",
                         alpha=0.02, is_train=is_train, stddv=self.stddv)
            net = dense_layer(x=net, num_neurons=1, name="output", activation=tf.identity, is_train=is_train,
                              stddv=self.stddv)
        return net
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_a(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
                            spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_16(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 256, 256
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = vgg.vgg_19(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                             spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 224, 224
    eval_height, eval_width = 300, 400
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = alexnet.alexnet_v2(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
                                     spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 4, 7, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:tensorflow_yolo2    作者:wenxichen    | 项目源码 | 文件源码
def testTrainEvalWithReuse(self):
    train_batch_size = 2
    eval_batch_size = 1
    train_height, train_width = 231, 231
    eval_height, eval_width = 281, 281
    num_classes = 1000
    with self.test_session():
      train_inputs = tf.random_uniform(
          (train_batch_size, train_height, train_width, 3))
      logits, _ = overfeat.overfeat(train_inputs)
      self.assertListEqual(logits.get_shape().as_list(),
                           [train_batch_size, num_classes])
      tf.get_variable_scope().reuse_variables()
      eval_inputs = tf.random_uniform(
          (eval_batch_size, eval_height, eval_width, 3))
      logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
                                    spatial_squeeze=False)
      self.assertListEqual(logits.get_shape().as_list(),
                           [eval_batch_size, 2, 2, num_classes])
      logits = tf.reduce_mean(logits, [1, 2])
      predictions = tf.argmax(logits, 1)
      self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
项目:skiprnn-2017-telecombcn    作者:imatge-upc    | 项目源码 | 文件源码
def layer_norm(x, axes=1, initial_bias_value=0.0, epsilon=1e-3, name="var"):
    """
    Apply layer normalization to x
    Args:
        x: input variable.
        initial_bias_value: initial value for the LN bias.
        epsilon: small constant value to avoid division by zero.
        scope: scope or name for the LN op.
    Returns:
        LN(x) with same shape as x
    """
    if not isinstance(axes, list):
        axes = [axes]

    scope = tf.get_variable_scope()
    with tf.variable_scope(scope):
        with tf.variable_scope(name):
            mean = tf.reduce_mean(x, axes, keep_dims=True)
            variance = tf.sqrt(tf.reduce_mean(tf.square(x - mean), axes, keep_dims=True))

            with tf.device('/cpu:0'):
                gain = tf.get_variable('gain', x.get_shape().as_list()[1:],
                                       initializer=tf.constant_initializer(1.0))
                bias = tf.get_variable('bias', x.get_shape().as_list()[1:],
                                       initializer=tf.constant_initializer(initial_bias_value))

            return gain * (x - mean) / (variance + epsilon) + bias