Python tensorflow.python.ops.variable_scope 模块,get_variable() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.variable_scope.get_variable()

项目:AssociativeRetrieval    作者:jxwufan    | 项目源码 | 文件源码
def _fwlinear(self, args, output_size, scope=None):
    if args is None or (nest.is_sequence(args) and not args):
      raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
      args = [args]
    assert len(args) == 2
    assert args[0].get_shape().as_list()[1] == output_size

    dtype = [a.dtype for a in args][0]

    with vs.variable_scope(scope or "Linear"):
      matrixW = vs.get_variable(
        "MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))

      matrixC = vs.get_variable(
        "MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)

      res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
      return res
项目:Multi-channel-speech-extraction-using-DNN    作者:zhr1201    | 项目源码 | 文件源码
def ln(tensor, scope=None, epsilon=1e-5):
    """ Layer normalizes a 2D tensor along its second axis """
    assert(len(tensor.get_shape()) == 2)
    m, v = tf.nn.moments(tensor, [1], keep_dims=True)
    if not isinstance(scope, str):
        scope = ''
    with tf.variable_scope(scope + 'layer_norm'):
        scale = tf.get_variable('scale',
                                shape=[tensor.get_shape()[1]],
                                initializer=tf.constant_initializer(1))
        shift = tf.get_variable('shift',
                                shape=[tensor.get_shape()[1]],
                                initializer=tf.constant_initializer(0))
    LN_initial = (tensor - m) / tf.sqrt(v + epsilon)

    return LN_initial * scale + shift
项目:joint-slu-lm    作者:HadoopIt    | 项目源码 | 文件源码
def multilayer_perceptron(_X, input_size, n_hidden, n_class, forward_only=False):
    with variable_scope.variable_scope("DNN"):
      bias_start = 0.0
      weight_hidden = variable_scope.get_variable("Weight_Hidden", [input_size, n_hidden])         
      bias_hidden = variable_scope.get_variable("Bias_Hidden", [n_hidden],
                                                  initializer=init_ops.constant_initializer(bias_start))
      #Hidden layer with RELU activation
      layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, weight_hidden), bias_hidden))

      if not forward_only:
          layer_1 = tf.nn.dropout(layer_1, 0.5)

      weight_out = variable_scope.get_variable("Weight_Out", [n_hidden, n_class])
      bias_out = variable_scope.get_variable("Bias_Out", [n_class],
                                                  initializer=init_ops.constant_initializer(bias_start))  
      output = tf.matmul(layer_1, weight_out) + bias_out
      #regularizers = tf.nn.l2_loss(weight_hidden) + tf.nn.l2_loss(bias_hidden) + tf.nn.l2_loss(weight_out) + tf.nn.l2_loss(bias_out) 
    return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def categorical_variable(tensor_in, n_classes, embedding_size, name):
  """Creates an embedding for categorical variable with given number of classes.

  Args:
    tensor_in: Input tensor with class identifier (can be batch or
      N-dimensional).
    n_classes: Number of classes.
    embedding_size: Size of embedding vector to represent each class.
    name: Name of this categorical variable.
  Returns:
    Tensor of input shape, with additional dimension for embedding.

  Example:
    Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
    tensor, where each row is representation of the class.
  """
  with vs.variable_scope(name):
    embeddings = vs.get_variable(name + "_embeddings",
                                 [n_classes, embedding_size])
    return embedding_lookup(embeddings, tensor_in)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def to_weighted_sum(self,
                      input_tensor,
                      num_outputs=1,
                      weight_collections=None,
                      trainable=True):

    def _weight(name):
      return variable_scope.get_variable(
          name,
          shape=[self.dimension, num_outputs],
          initializer=init_ops.zeros_initializer,
          collections=_add_variable_collection(weight_collections))

    if self.name:
      weight = _weight("weight")
    else:
      # Old behavior to support a subset of old checkpoints.
      weight = _weight("_weight")

    # The _RealValuedColumn has the shape of [batch_size, column.dimension].
    log_odds_by_dim = math_ops.matmul(input_tensor, weight, name="matmul")
    return log_odds_by_dim, [weight]
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def categorical_variable(tensor_in, n_classes, embedding_size, name):
  """Creates an embedding for categorical variable with given number of classes.

  Args:
    tensor_in: Input tensor with class identifier (can be batch or
      N-dimensional).
    n_classes: Number of classes.
    embedding_size: Size of embedding vector to represent each class.
    name: Name of this categorical variable.
  Returns:
    Tensor of input shape, with additional dimension for embedding.

  Example:
    Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
    tensor, where each row is representation of the class.
  """
  with vs.variable_scope(name):
    embeddings = vs.get_variable(name + '_embeddings',
                                 [n_classes, embedding_size])
    return embedding_lookup(embeddings, tensor_in)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states
项目:GORU-tensorflow    作者:jingli9111    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "eunn_cell"):

            state = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)

            input_matrix_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            if self._comp:
                input_matrix_re = vs.get_variable("U_re", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
                input_matrix_im = vs.get_variable("U_im", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
                inputs_re = math_ops.matmul(inputs, input_matrix_re)
                inputs_im = math_ops.matmul(inputs, input_matrix_im)
                inputs = math_ops.complex(inputs_re, inputs_im)
            else:
                input_matrix = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
                inputs = math_ops.matmul(inputs, input_matrix)

            bias = vs.get_variable("modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer())
            output = self._activation((inputs + state), bias, self._comp)

        return output, output
项目:ChineseNER    作者:zjy-ucas    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:LSTM-CRF-For-Named-Entity-Recognition    作者:zpppy    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size, shape[1]],
                                  dtype=dtype))
  return shards
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._embedding:
          embedding = self._embedding
        else:
          if self._initializer:
            initializer = self._initializer
          elif vs.get_variable_scope().initializer:
            initializer = vs.get_variable_scope().initializer
          else:
            # Default initializer for embeddings should have variance=1.
            sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
          embedding = vs.get_variable("embedding", [self._embedding_classes,
                                                    self._cell.input_size],
                                      initializer=initializer)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
        state_out = linearTransformIdentityInit(state, self._num_units)

        if self._bottom == True:
            input_out = linearTransformWithBias([inputs], self._num_units, bias=False, scope=scope)
        else:
            input_out = linearTransformIdentityInit(inputs, self._num_units, scope=scope)
        bias = vs.get_variable(
            "input_bias", [self._num_units],
            dtype=tf.float32,
            initializer=init_ops.constant_initializer(dtype=tf.float32))

    output = tf.abs(state_out + input_out + bias)
    return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            t_state = tf.transpose(state)

            state_out = doRotations(t_state, self._rotations)
            input_out = linearTransformWithBias([inputs],
                self._num_units, bias=False, scope=scope)

            state_out = tf.transpose(state_out)

            bias = vs.get_variable(
                "Bias", [self._num_units],
                dtype=tf.float32,
                initializer=init_ops.constant_initializer(dtype=tf.float32))

            output = tf.nn.relu(state_out + input_out + bias)
        return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
        state_out = linearTransformIdentityInit(state, self._num_units)

        if self._bottom == True:
            input_out = linearTransformWithBias([inputs], self._num_units, bias=False, scope=scope)
        else:
            input_out = linearTransformIdentityInit(inputs, self._num_units, scope=scope)
        bias = vs.get_variable(
            "input_bias", [self._num_units],
            dtype=tf.float32,
            initializer=init_ops.constant_initializer(dtype=tf.float32))

    output = tf.nn.relu(state_out + input_out + bias)
    return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            t_state = tf.transpose(state)

            state_out = doRotations(t_state, self._rotations)
            input_out = linearTransformWithBias([inputs],
                self._num_units, bias=False, scope=scope)

            state_out = tf.transpose(state_out)

            gate = linearTransformWithBias([inputs, state], self._num_units, True, scope='GateLinearTransfrom')
            gate = tf.nn.sigmoid(gate, name='GateSigmoid')

            bias = vs.get_variable(
                "Bias", [self._num_units],
                dtype=tf.float32,
                initializer=init_ops.constant_initializer(dtype=tf.float32))

            input_gate = tf.add(-1.0, gate)
            # print(input_gate)
            output = state * gate + input_gate * tf.abs(state_out + input_out + bias)
        return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            t_state = tf.transpose(state)

            state_out = doRotations(t_state, self._rotations)
            input_out = linearTransformWithBias([inputs],
                self._num_units, bias=False, scope=scope)

            state_out = tf.transpose(state_out)

            bias = vs.get_variable(
                "Bias", [self._num_units],
                dtype=tf.float32,
                initializer=init_ops.constant_initializer(dtype=tf.float32))

            output = tf.abs(state_out + input_out + bias)
        return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):

        with vs.variable_scope(scope or type(self).__name__):

            t_state = tf.transpose(state)
            t_inputs = tf.transpose(inputs)
            if self._bottom == True:
                [state_out] = rotationTransform([("StateL", t_state)], self._num_units , scope, self._num_rots)
                input_out = linearTransformWithBias([inputs],
                    self._num_units, bias=False, scope=scope)
            else:
                [state_out, input_out] = \
                    rotationTransform([("StateL", t_state), ("InputL", t_inputs)],
                    self._num_units, scope)
                input_out = tf.transpose(input_out)
            state_out = tf.transpose(state_out)


            bias = vs.get_variable(
                "Bias", [self._num_units],
                dtype=tf.float32,
                initializer=init_ops.constant_initializer(dtype=tf.float32))

            output = tf.abs(state_out + input_out + bias)
        return output, output
项目:dizzy_layer    作者:Pastromhaug    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or type(self).__name__):

            state_rot = rotationTransform(tf.transpose(state), self._num_units, self._num_params,
                self._cos_list,  self._sin_list, self._nsin_list,
                self._cos_idxs, self._sin_idxs, self._nsin_idxs)

            state_scale, sigma = diagonalTransform(state_rot, self._num_units)
            self.sigma = sigma

            state_out = rotationTransform(state_scale, self._num_units, self._num_params,
                self._cos_list,  self._sin_list, self._nsin_list,
                self._cos_idxs, self._sin_idxs, self._nsin_idxs)
            state_out = tf.transpose(state_out)

            input_out = linearTransformWithBias([inputs], self._num_units, bias=False)

            bias = vs.get_variable(
                "Bias", [self._num_units],
                dtype=tf.float32,
                initializer=init_ops.constant_initializer(dtype=tf.float32))

            output = tf.abs(state_out + input_out + bias)
        return output, output
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
                          embedding_size, output_projection=None,
                          feed_previous=False,
                          update_embedding_for_previous=True, scope=None):
  """RNN decoder with embedding and a pure-decoding option.

  """
  if output_projection is not None:
    proj_weights = ops.convert_to_tensor(output_projection[0],
                                         dtype=dtypes.float32)
    proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
    proj_biases = ops.convert_to_tensor(
        output_projection[1], dtype=dtypes.float32)
    proj_biases.get_shape().assert_is_compatible_with([num_symbols])

  with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
    embedding = variable_scope.get_variable("embedding",
            [num_symbols, embedding_size])
    loop_function = _extract_argmax_and_embed(
        embedding, output_projection,
        update_embedding_for_previous) if feed_previous else None
    emb_inp = (
        embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
    return rnn_decoder(emb_inp, initial_state, cell,
                       loop_function=loop_function)
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
                          embedding_size, output_projection=None,
                          feed_previous=False,
                          update_embedding_for_previous=True, scope=None):
  """RNN decoder with embedding and a pure-decoding option.

  """
  if output_projection is not None:
    proj_weights = ops.convert_to_tensor(output_projection[0],
                                         dtype=dtypes.float32)
    proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
    proj_biases = ops.convert_to_tensor(
        output_projection[1], dtype=dtypes.float32)
    proj_biases.get_shape().assert_is_compatible_with([num_symbols])

  with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
    embedding = variable_scope.get_variable("embedding",
            [num_symbols, embedding_size])
    loop_function = _extract_argmax_and_embed(
        embedding, output_projection,
        update_embedding_for_previous) if feed_previous else None
    emb_inp = (
        embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
    return rnn_decoder(emb_inp, initial_state, cell,
                       loop_function=loop_function)
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
                          embedding_size, output_projection=None,
                          feed_previous=False,
                          update_embedding_for_previous=True, scope=None):
  """RNN decoder with embedding and a pure-decoding option.

  """
  if output_projection is not None:
    proj_weights = ops.convert_to_tensor(output_projection[0],
                                         dtype=dtypes.float32)
    proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
    proj_biases = ops.convert_to_tensor(
        output_projection[1], dtype=dtypes.float32)
    proj_biases.get_shape().assert_is_compatible_with([num_symbols])

  with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
    embedding = variable_scope.get_variable("embedding",
            [num_symbols, embedding_size])
    loop_function = _extract_argmax_and_embed(
        embedding, output_projection,
        update_embedding_for_previous) if feed_previous else None
    emb_inp = (
        embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
    return rnn_decoder(emb_inp, initial_state, cell,
                       loop_function=loop_function)
项目:seq2seq_parser    作者:trangham283    | 项目源码 | 文件源码
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
                          embedding_size, output_projection=None,
                          feed_previous=False,
                          update_embedding_for_previous=True, scope=None):
  """RNN decoder with embedding and a pure-decoding option.

  """
  if output_projection is not None:
    proj_weights = ops.convert_to_tensor(output_projection[0],
                                         dtype=dtypes.float32)
    proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
    proj_biases = ops.convert_to_tensor(
        output_projection[1], dtype=dtypes.float32)
    proj_biases.get_shape().assert_is_compatible_with([num_symbols])

  with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
    embedding = variable_scope.get_variable("embedding",
            [num_symbols, embedding_size])
    loop_function = _extract_argmax_and_embed(
        embedding, output_projection,
        update_embedding_for_previous) if feed_previous else None
    emb_inp = (
        embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
    return rnn_decoder(emb_inp, initial_state, cell,
                       loop_function=loop_function)
项目:PLSTM    作者:Enny1991    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
    """Get a list of sharded variables with the given dtype."""
    if num_shards > shape[0]:
        raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                         (shape, num_shards))
    unit_shard_size = int(math.floor(shape[0] / num_shards))
    remaining_rows = shape[0] - unit_shard_size * num_shards

    shards = []
    for i in range(num_shards):
        current_size = unit_shard_size
        if i < remaining_rows:
            current_size += 1
        shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                      dtype=dtype))
    return shards
项目:meta-learning    作者:ioanachelu    | 项目源码 | 文件源码
def _fwlinear(self, args, output_size, scope=None):
    if args is None or (nest.is_sequence(args) and not args):
      raise ValueError("`args` must be specified")
    if not nest.is_sequence(args):
      args = [args]
    assert len(args) == 2
    assert args[0].get_shape().as_list()[1] == output_size

    dtype = [a.dtype for a in args][0]

    with vs.variable_scope(scope or "Linear"):
      # matrixW = vs.get_variable(
      #   "MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05))
      matrixW = vs.get_variable("MatrixW", [output_size, output_size], dtype=dtype)
      matrixC = vs.get_variable(
        "MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype)

      res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC)
      return res
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:ROLO    作者:Guanghan    | 项目源码 | 文件源码
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards
项目:ROLO    作者:Guanghan    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:Variational-Recurrent-Autoencoder-Tensorflow    作者:Chung-I    | 项目源码 | 文件源码
def latent_to_decoder(latent_vector,
                      embedding_size,
                      latent_dim,
                      num_layers,
                      activation=tf.nn.relu,
                      use_lstm=False,
                      dtype=None):

  concat_state_size = num_layers * embedding_size
  if use_lstm:
    concat_state_size *= 2
  with tf.variable_scope('latent_to_decoder'):
    w = tf.get_variable("w",[latent_dim, concat_state_size],
      dtype=dtype)
    b = tf.get_variable("b", [concat_state_size], dtype=dtype)
    decoder_initial_state = prelu(tf.matmul(latent_vector, w) + b)
  if num_layers > 1:
    decoder_initial_state = tuple(tf.split(1, num_layers, decoder_initial_state))
    if use_lstm:
      decoder_initial_state = [tuple(tf.split(1, 2, single_layer_state)) for single_layer_state in decoder_initial_state]
  elif use_lstm:
    decoder_initial_state = tuple(tf.split(1, 2, decoder_initial_state))

  return decoder_initial_state
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_checkpoints(sess, checkpoint_dir):
  checkpoint_prefix = os.path.join(checkpoint_dir, "model")
  checkpoint_state_name = "checkpoint"
  v1 = variable_scope.get_variable("var1", [1, 10])
  v2 = variable_scope.get_variable("var2", [10, 10])
  v3 = variable_scope.get_variable("var3", [100, 100])
  with variable_scope.variable_scope("useful_scope"):
    v4 = variable_scope.get_variable("var4", [9, 9])
  sess.run(variables.global_variables_initializer())
  v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
  saver = saver_lib.Saver()
  saver.save(
      sess,
      checkpoint_prefix,
      global_step=0,
      latest_filename=checkpoint_state_name)
  return v1_value, v2_value, v3_value, v4_value
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _create_partition_checkpoints(sess, checkpoint_dir):
  checkpoint_prefix = os.path.join(checkpoint_dir, "model")
  checkpoint_state_name = "checkpoint"
  with variable_scope.variable_scope("scope"):
    v1 = variable_scope.get_variable(
        name="var1",
        shape=[100, 100],
        initializer=init_ops.truncated_normal_initializer(0.5),
        partitioner=partitioned_variables.min_max_variable_partitioner(
            max_partitions=5, axis=0, min_slice_size=8 << 10))
  sess.run(variables.global_variables_initializer())
  v1_value = sess.run(v1._get_variable_list())
  saver = saver_lib.Saver()
  saver.save(
      sess,
      checkpoint_prefix,
      global_step=0,
      latest_filename=checkpoint_state_name)
  return v1_value
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testInitFromRootCheckpoint(self):
    checkpoint_dir = self.get_temp_dir()
    with self.test_session() as session:
      v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)

    # New graph and session.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as session:
        with variable_scope.variable_scope("some_scope"):
          my1 = variable_scope.get_variable("var1", [1, 10])
          my2 = variable_scope.get_variable("var2", [10, 10])
          my3 = variable_scope.get_variable("var3", [100, 100])
          with variable_scope.variable_scope("useful_scope"):
            my4 = variable_scope.get_variable("var4", [9, 9])

        checkpoint_utils.init_from_checkpoint(checkpoint_dir,
                                              {"/": "some_scope/",})

        session.run(variables.global_variables_initializer())
        self.assertAllEqual(my1.eval(session), v1)
        self.assertAllEqual(my2.eval(session), v2)
        self.assertAllEqual(my3.eval(session), v3)
        self.assertAllEqual(my4.eval(session), v4)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testStochasticVariables(self):
    shape = (10, 20)
    with variable_scope.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusScale)):
      v = variable_scope.get_variable("sv", shape)

    self.assertTrue(isinstance(v, st.StochasticTensor))
    self.assertTrue(isinstance(v.distribution, dist.NormalWithSoftplusScale))

    self.assertEqual(
        {"stochastic_variables/sv_loc", "stochastic_variables/sv_scale"},
        set([v.op.name for v in variables.global_variables()]))
    self.assertEqual(
        set(variables.trainable_variables()), set(variables.global_variables()))

    v = ops.convert_to_tensor(v)
    self.assertEqual(list(shape), v.get_shape().as_list())
    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertEqual(shape, sess.run(v).shape)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testStochasticVariablesWithConstantInitializer(self):
    shape = (10, 20)
    with variable_scope.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusScale,
            dist_kwargs={"validate_args": True},
            param_initializers={
                "loc": np.ones(shape) * 4.,
                "scale": np.ones(shape) * 2.
            })):
      v = variable_scope.get_variable("sv")

    for var in variables.global_variables():
      if "loc" in var.name:
        mu_var = var
      if "scale" in var.name:
        sigma_var = var

    v = ops.convert_to_tensor(v)
    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
      self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
      self.assertEqual(shape, sess.run(v).shape)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testStochasticVariablesWithCallablePriorInitializer(self):

    def prior_init(shape, dtype):
      return dist.Normal(
          array_ops.zeros(shape, dtype), array_ops.ones(shape, dtype))

    with variable_scope.variable_scope(
        "stochastic_variables",
        custom_getter=sv.make_stochastic_variable_getter(
            dist_cls=dist.NormalWithSoftplusScale, prior=prior_init)):
      w = variable_scope.get_variable("weights", (10, 20))

    x = random_ops.random_uniform((8, 10))
    y = math_ops.matmul(x, w)

    prior_map = vi._find_variational_and_priors(y, None)
    self.assertTrue(isinstance(prior_map[w], dist.Normal))
    elbo = vi.elbo(y, keep_batch_dim=False)

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      sess.run(elbo)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testGradientWithZeroWeight(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)

      inputs = array_ops.ones((2, 3))
      weights = variable_scope.get_variable(
          'weights',
          shape=[3, 4],
          initializer=init_ops.truncated_normal_initializer())
      predictions = math_ops.matmul(inputs, weights)

      optimizer = momentum_lib.MomentumOptimizer(
          learning_rate=0.001, momentum=0.9)
      loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)

      gradients_to_variables = optimizer.compute_gradients(loss)

      init_op = variables.global_variables_initializer()

      with self.test_session() as sess:
        sess.run(init_op)
        for grad, _ in gradients_to_variables:
          np_grad = sess.run(grad)
          self.assertFalse(np.isnan(np_grad).any())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def categorical_variable(tensor_in, n_classes, embedding_size, name):
  """Creates an embedding for categorical variable with given number of classes.

  Args:
    tensor_in: Input tensor with class identifier (can be batch or
      N-dimensional).
    n_classes: Number of classes.
    embedding_size: Size of embedding vector to represent each class.
    name: Name of this categorical variable.
  Returns:
    Tensor of input shape, with additional dimension for embedding.

  Example:
    Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
    tensor, where each row is representation of the class.
  """
  with vs.variable_scope(name):
    embeddings = vs.get_variable(name + '_embeddings',
                                 [n_classes, embedding_size])
    return embedding_lookup(embeddings, tensor_in)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step=None,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUpdateOp(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUpdateOpWithNoOpDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUpdateOpFromCollection(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.test_session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
        train = optimizers_lib.optimize_loss(
            loss, global_step, learning_rate=0.1, optimizer=optimizer)
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        var_value, update_var_value, global_step_value = session.run(
            [var, update_var, global_step])
        self.assertEqual(var_value, 9.5)
        self.assertEqual(update_var_value, 20)
        self.assertEqual(global_step_value, 1)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testJITVariableSeed(self):
    """Test that the stateful initializer is not marked for compilation.

    XLA does not currently support seeded initialization and XLA initializers
    therefore return different values than non-XLA counterparts.  Here
    we ensure that if we can disable JIT compilation for the initializers and
    get the same variable values as if no JIT compilation happened.
    """
    def create_ops():
      with variable_scope.variable_scope(
          "root",
          initializer=init_ops.random_uniform_initializer(
              -0.1, 0.1, seed=2)):
        inputs = variable_scope.get_variable("var", (1,))
        return inputs
    _, v_false_1 = self.compute(False, create_ops)
    _, v_false_2 = self.compute(False, create_ops)
    _, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
    _, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
    self.assertAllClose(v_false_1, v_false_2)
    self.assertAllClose(v_true_1, v_true_2)
    self.assertAllClose(v_false_1, v_true_1)
项目:RFR-solution    作者:baoblackcoal    | 项目源码 | 文件源码
def embedding_attention_decoder(initial_state,
                                attention_states,
                                cell,
                                num_symbols,
                                time_steps,
                                batch_size,
                                embedding_size,
                                output_size=None,
                                output_projection=None,
                                feed_previous=False,
                                update_embedding_for_previous=True,
                                dtype=None,
                                scope=None):
  if output_size is None:
    output_size = cell.output_size
  if output_projection is not None:
    proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
    proj_biases.get_shape().assert_is_compatible_with([num_symbols])

  with variable_scope.variable_scope(
      scope or "embedding_attention_decoder", dtype=dtype) as scope:

    embedding = variable_scope.get_variable("embedding",
                                            [num_symbols, embedding_size])
    loop_function = tf.nn.seq2seq._extract_argmax_and_embed(
        embedding, output_projection,
        update_embedding_for_previous) if feed_previous else None
    return attention_decoder(
        initial_state,
        attention_states,
        cell,
        num_symbols,
        time_steps,
        batch_size,
        output_size=output_size,
        loop_function=loop_function)
项目:mann-for-speech-separation    作者:KWTsou1220    | 项目源码 | 文件源码
def get_biases(name, shape, value, trainable = True):
    return tf.get_variable('biases{}'.format(name), shape,
                           initializer = tf.constant_initializer(value),
                           trainable = trainable)
项目:mann-for-speech-separation    作者:KWTsou1220    | 项目源码 | 文件源码
def _weight_variable(shape, name='weights', initializer=tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32)):
    return tf.get_variable(shape=shape, initializer=initializer, name=name)