Python tensorflow 模块,Variables() 实例源码

我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用tensorflow.Variables()

项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def var_list(self, mode=VlMode.RAW):
        """
        Get the chunks that define this variable.

        :param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
                         or MergedVariables
                     VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
                     MergedVariable
                     VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
        :return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
        """
        if mode == VlMode.RAW:
            return self._var_list
        elif mode == VlMode.BASE:
            return self._get_base_variable_list()
        elif mode == VlMode.TENSOR:
            return self._var_list_as_tensors()  # return w unic tensor + copies augmented
        else:
            raise NotImplementedError('mode %d does not exists' % mode)
项目:RNNVis    作者:myaooo    | 项目源码 | 文件源码
def compile(self):
        """
        Compile the model. Should be called before training or running the model.
        Basically, this function just do checkings on model configurations,
            and create a Evaluator which contains an unrolled model
        :return: None
        """
        if self.is_compiled:  # In case of multiple compiles
            print("Already compiled!")
            return
        if self.input_shape is None or self.input_dtype is None:
            raise ValueError("input_shape or input_dtype is None, call set_input first!")
        if self.output_shape is None or self.output_dtype is None:
            raise ValueError("output_shape or output_dtype is None, call set_output first!")
        if self.target_shape is None or self.target_dtype is None:
            raise ValueError("target_shape or target_dtype is None, call set_target first!")
        if self.loss_func is None:
            raise ValueError("loss_func is None, call set_loss_func first!")
        # This operation creates no tf.Variables, no need for using variable scope
        self._cell = tf.nn.rnn_cell.MultiRNNCell(cells=self.cell_list)
        # All done
        self.is_compiled = True
项目:RNNVis    作者:myaooo    | 项目源码 | 文件源码
def map_to_embedding(self, inputs):
        """
        Map the input ids into embedding
        :param inputs: a 2D Tensor of shape (num_steps, batch_size) of type int32, denoting word ids
        :return: a 3D Tensor of shape (num_Steps, batch_size, embedding_size) of type float32.
        """
        if self.has_embedding:
            # The Variables are already created in the compile(), need to
            with tf.variable_scope('embedding', initializer=self.initializer):
                with tf.device("/cpu:0"):  # Force CPU since GPU implementation is missing
                    embedding = tf.get_variable("embedding",
                                                [self.vocab_size+1, self.embedding_size],
                                                dtype=data_type())
                    return tf.nn.embedding_lookup(embedding, inputs)
        else:
            return None
项目:CS224n    作者:akash9182    | 项目源码 | 文件源码
def add_prediction_op(self):
        """Adds the core transformation for this model which transforms a batch of input
        data into a batch of predictions. In this case, the transformation is a linear layer plus a
        softmax transformation:

        y = softmax(Wx + b)

        Hint: Make sure to create tf.Variables as needed.
        Hint: For this simple use-case, it's sufficient to initialize both weights W
                    and biases b with zeros.

        Args:
            input_data: A tensor of shape (batch_size, n_features).
        Returns:
            pred: A tensor of shape (batch_size, n_classes)
        """
        ### YOUR CODE HERE
        W = tf.Variable(tf.zeros((self.config.n_features, self.config.n_classes)))
        b = tf.Variable(tf.zeros((self.config.n_classes)))
        pred = softmax(tf.matmul(self.input_placeholder, W) + b)

        ### END YOUR CODE
        return pred
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def norm_posterior(dim, std0):
    """Initialise a posterior (diagonal) Normal distribution.

    Parameters
    ----------
    dim : tuple or list
        the dimension of this distribution.
    std0 : float
        the initial (unoptimized) standard deviation of this distribution.

    Returns
    -------
    Q : tf.distributions.Normal
        the initialised posterior Normal object.

    Note
    ----
    This will make tf.Variables on the randomly initialised mean and standard
    deviation of the posterior. The initialisation of the mean is from a Normal
    with zero mean, and ``std0`` standard deviation, and the initialisation of
    the standard deviation is from a gamma distribution with an alpha of
    ``std0`` and a beta of 1.

    """
    mu_0 = tf.random_normal(dim, stddev=std0, seed=next(seedgen))
    mu = tf.Variable(mu_0, name="W_mu_q")

    std_0 = tf.random_gamma(alpha=std0, shape=dim, seed=next(seedgen))
    std = pos(tf.Variable(std_0, name="W_std_q"))

    Q = tf.distributions.Normal(loc=mu, scale=std)
    return Q
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,),  # pylint: disable=redefined-outer-name
              context=None, **kwargs):
  """Builds a `tf.train.Saver` for the scope or module, with normalized names.

  The names of the variables are normalized to remove the scope prefix.
  This allows the same variables to be restored into another similar scope or
  module using a complementary `tf.train.Saver` object.

  Args:
    scope: Scope or module. Variables within will be saved or restored.
    collections: Sequence of collections of variables to restrict
        `tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`
        which includes moving averages variables as well as trainable variables.
    context: Scope or module, identical to or parent of `scope`. If given, this
        will be used as the stripped prefix.
    **kwargs: Extra keyword arguments to pass to tf.train.Saver.

  Returns:
    A `tf.train.Saver` object for Variables in the scope or module.
  """

  variable_map = {}
  for collection in collections:
    variable_map.update(get_normalized_variable_map(scope, collection, context))

  return tf.train.Saver(var_list=variable_map, **kwargs)
项目:Safe-RL-Benchmark    作者:befelix    | 项目源码 | 文件源码
def parameters(self):
        """Return weights of the neural network.

        This returns a list of tf.Variables. Please note that these can not
        simply be updated by assignment. See the parameters.setter docstring
        for more information.
        The list of tf.Variables can be directly accessed through the
        attribute `W`.
        """
        if self.sess is None:
            return tf.get_default_session().run(self.W_action + self.W_var)
        else:
            return self.sess.run(self.W_action + self.W_var)
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def __init__(self, _input, name, deterministic_initialization=False):
        """
        Creates an object that represent a network. Important attributes of a Network object are

        `var_list`: list of tf.Variables that constitute the parameters of the model

        `inp`: list, first element is `_input` and last should be output of the model. Other entries can be
        hidden layers activations.

        :param _input: tf.Tensor, input of this model.
        """
        super(Network, self).__init__()

        self.name = name

        self.deterministic_initialization = deterministic_initialization
        self._var_list_initial_values = []
        self._var_init_placeholder = None
        self._assign_int = []
        self._var_initializer_op = None

        self.Ws = []
        self.bs = []
        self.inp = [_input]
        self.out = None  # for convenience
        self.var_list = []

        self.active_gen = []
        self.active_gen_kwargs = []

        self.w = None
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def _get_base_variable_list(self):
        """
        This methods checks that all the elements of var_list are legitimate (tf.Variables or MergedVariables)
        and returns the underlying tf.Variables.
        :return:
        """
        res = []
        for v in self._var_list:
            if isinstance(v, MergedVariable):
                res.extend(v._get_base_variable_list())
            elif isinstance(v, tf.Variable):
                res.append(v)
            else:
                raise ValueError('something wrong here')
        return res
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def assign(self, value, use_locking=False):
        """
        Behaves as tf.Variable.assign, building assign ops for the underlying (original) Variables


        :param value: rank-1 tensor. Assumes it has the same structure as the tensor contained in the object.
        :param use_locking: (optional) see use_locking in `tf.Variables.assign`
        :return: A list of `tf.Variables.assign` ops.
        """
        assign_ops = [
            wsr(v.assign(reshape(value), use_locking=use_locking)) for v, reshape in self.chunks_info_dict.items()
        ]
        return tf.group(*assign_ops)
项目:Tensorflow-Softmax-NER-RNNLM    作者:queue-han    | 项目源码 | 文件源码
def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    #raise NotImplementedError

    self.W = tf.Variable(tf.zeros([self.config.n_features, self.config.n_classes]), tf.float32, name="weight")
    self.b = tf.Variable(tf.zeros([self.config.batch_size, self.config.n_classes]), tf.float32, name="bias")
    out = softmax(tf.matmul(input_data, self.W) + self.b)

    ### END YOUR CODE
    return out
项目:Named-Entity-Recognition    作者:AliceDudu    | 项目源码 | 文件源码
def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    n_features, n_classes = self.config.n_features, self.config.n_classes
    with tf.name_scope('softmax_linear'):
      weights = tf.Variable(
          tf.zeros([n_features, n_classes]),
          name='weights')
      biases = tf.Variable(tf.zeros([n_classes]),
                           name='biases')
      logits = tf.matmul(input_data, weights) + biases
      out = softmax(logits)
    ### END YOUR CODE
    return out
项目:cs224d    作者:kkihara    | 项目源码 | 文件源码
def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    with tf.name_scope('linear_layer'):
        W = tf.Variable(np.zeros((self.config.n_features,
                                  self.config.n_classes)).astype(np.float32))
        b = tf.Variable(np.zeros((self.config.n_classes,)).astype(np.float32))
        out = softmax(tf.matmul(input_data, W) + b)
    ### END YOUR CODE
    return out
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def gaus_posterior(dim, std0):
    """Initialise a posterior Gaussian distribution with a diagonal covariance.

    Even though this is initialised with a diagonal covariance, a full
    covariance will be learned, using a lower triangular Cholesky
    parameterisation.

    Parameters
    ----------
    dim : tuple or list
        the dimension of this distribution.
    std0 : float
        the initial (unoptimized) diagonal standard deviation of this
        distribution.

    Returns
    -------
    Q : tf.contrib.distributions.MultivariateNormalTriL
        the initialised posterior Gaussian object.

    Note
    ----
    This will make tf.Variables on the randomly initialised mean and covariance
    of the posterior. The initialisation of the mean is from a Normal with zero
    mean, and ``std0`` standard deviation, and the initialisation of the (lower
    triangular of the) covariance is from a gamma distribution with an alpha of
    ``std0`` and a beta of 1.

    """
    o, i = dim

    # Optimize only values in lower triangular
    u, v = np.tril_indices(i)
    indices = (u * i + v)[:, np.newaxis]
    l0 = np.tile(np.eye(i), [o, 1, 1])[:, u, v].T
    l0 = l0 * tf.random_gamma(alpha=std0, shape=l0.shape, seed=next(seedgen))
    lflat = tf.Variable(l0, name="W_cov_q")
    Lt = tf.transpose(tf.scatter_nd(indices, lflat, shape=(i * i, o)))
    L = tf.reshape(Lt, (o, i, i))

    mu_0 = tf.random_normal((o, i), stddev=std0, seed=next(seedgen))
    mu = tf.Variable(mu_0, name="W_mu_q")
    Q = MultivariateNormalTriL(mu, L)
    return Q


#
# KL divergence calculations
#
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def get_normalized_variable_map(scope_or_module,
                                collection=tf.GraphKeys.GLOBAL_VARIABLES,
                                context=None,
                                group_sliced_variables=True):
  """Builds map of `tf.Variable`s in scope or module with normalized names.

  The names of the variables are normalized to remove the scope prefix.

  Args:
    scope_or_module: Scope or module to build map from.
    collection: Collection to restrict query to. By default this is
        `tf.Graphkeys.VARIABLES`, which includes non-trainable variables such
        as moving averages.
    context: Scope or module, identical to or parent of `scope`. If given, this
        will be used as the stripped prefix. By default `None`, which means
        `context=scope`.
    group_sliced_variables: Boolean, if set to True, sliced variables are
       grouped together in the returned map; if set to False, each partition of
       a sliced variable is a separate (key, value) pair.

  Returns:
    Dictionary mapping normalized variable name to `tf.Variable`, or a list
        of `tf.Variables` if the variable is a sliced (partitioned) variable.

  Raises:
    ValueError: If `context` is given but is not a proper prefix of `scope`.
  """
  scope_name = get_variable_scope_name(scope_or_module)

  if context is None:
    context = scope_or_module

  prefix = get_variable_scope_name(context)
  prefix_length = len(prefix) + 1 if prefix else 0

  if not _is_scope_prefix(scope_name, prefix):
    raise ValueError("Scope '{}' is not prefixed by '{}'.".format(
        scope_name, prefix))

  variables = get_variables_in_scope(scope_name, collection)

  if not group_sliced_variables:
    single_vars = variables
    grouped_vars = dict()
  else:
    single_vars, grouped_vars = _get_sliced_variables(variables)

  var_map = {var.op.name[prefix_length:]: var for var in single_vars}
  for full_name, var_group in grouped_vars.items():
    name = full_name[prefix_length:]
    if name in var_map:
      raise ValueError("Mixing slices and non-slices with the same name: " +
                       str(name))
    var_map[name] = var_group
  return var_map
项目:board-yet    作者:cmcneil    | 项目源码 | 文件源码
def build_training_graph(self, batch, labels):
        """Takes in the graph nodes representing a training batch and
        associated labels, and builds the forward training graph, 
        including the embedding itself. Returns nodes representing
        the logits for the positive examples, as well as the logits
        for associated negatives for negative sampling."""
        # We do this because word2vec initializes the weights this way
        init_width = 0.5 / self.embed_dim
        # The actual embedding:
        # The shape of the tensor is weird because we are going to use the
        # "embedding_lookup" function instead of just multipling with a 1-hot.
        emb = tf.Variable(tf.random_uniform([self.vocab_size, self.embed_dim],
                                            -init_width, init_width),
                          name="embedding")
        self.emb = emb

        # For training, we actually need to train a softmax classifier.
        # This tensor can be thought of as a complete softmax unit for
        # every possible game. (Each row is a set of weights)
        softmax_w = tf.Variable(tf.zeros([self.vocab_size, self.embed_dim]),
                                name="softmax_weights")
        softmax_b = tf.Variables(tf.zeros([self.vocab_size]), 
                                 name="softmax_bias")

        # Negative sampling for SGNS. We make the assumption of sparsity.
        # On average, randomly sampled games will be negatives.
        labels_reformat = tf.reshape(tf.cast(labels, dtype=tf.int64),
                            [len(training_labels), 1])
        sampled_ids = tf.nn.fixed_unigram_candidate_sampler(
                true_classes=labels_reformat,
                num_true=1,
                num_sampled=self.num_negatives,
                unique=True,
                range_max=self.vocab_size,
                distortion=0.75,
                unigrams=self.total_item_counts)

        batch_embeds = tf.embedding_lookup(emb, batch)
        # Lookup the softmax classifiers for the training batch.
        # I don't particularly like the use of "embedding_lookup",
        # because softmax_w etc. aren't technically embedding
        # matrices. This is apparently the canonical way to do it in TF though.
        batch_sm_w = tf.embedding_lookup(softmax_w, labels)
        batch_sm_b = tf.embedding_lookup(softmax_b, labels)

        # Lookup the softmax classifers for the negative samples.
        neg_sm_w = tf.embedding_lookup(softmax_w, sampled_ids)
        neg_sm_b = tf.embedding_lookup(softmax_b, sampled_ids)

        # Produces a tensor that represents the logits (the arg of the
        # exponential numerator in softmax) for each of the examples
        # in the training batch.
        batch_logits = (tf.reduce_sum(tf.mul(batch_embeds, batch_sm_w), 1) 
                        + batch_sm_b)
        neg_logits = (tf.reduce_sum(tf.mul(batch_embeds, 
                                           neg_sm_w, transpose_b=True)) +
                      tf.reshape(neg_sm_b, [self.num_negatives]))

        return batch_logits, neg_logits