Python tensorflow.python.framework.ops 模块,device() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.ops.device()

项目:DeepNovo    作者:nh2tran    | 项目源码 | 文件源码
def _build_embedding_AAid(self, input_AAid):
    """TODO(nh2tran): docstring.

       Inputs:
         input_AAid: list of 2 tensors [batch_size].

       Outputs:
         embedding_AAid: list of 2 tensors [batch_size, embedding_size].
    """

    print("".join(["="] * 80)) # section-separating line
    print("ModelNetwork: _build_embedding_AAid()")

    scope = "embedding_rnn_seq2seq/embedding_rnn_decoder" # TODO(nh2tran): to change to "embedding_AAid"
    with tf.variable_scope(scope):

      with ops.device("/cpu:0"):
        embedding = tf.get_variable(
            name="embedding",
            shape=[self.vocab_size, self.embedding_size])

      embedding_AAid = [embedding_ops.embedding_lookup(embedding, x)
                        for x in input_AAid]

    return embedding_AAid
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def inference_graph(self, input_data, data_spec=None):
    """Constructs a TF graph for evaluating a random forest.

    Args:
      input_data: A tensor or SparseTensor or placeholder for input data.
      data_spec: A list of tf.dtype values specifying the original types of
        each column.

    Returns:
      The last op in the random forest inference graph.
    """
    data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
    probabilities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        tree_data = input_data
        if self.params.bagged_features:
          tree_data = self._bag_features(i, input_data)
        probabilities.append(self.trees[i].inference_graph(tree_data,
                                                           data_spec))
    with ops.device(self.device_assigner.get_device(0)):
      all_predict = array_ops.pack(probabilities)
      return math_ops.div(
          math_ops.reduce_sum(all_predict, 0), self.params.num_trees,
          name='probabilities')
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def approximate_duality_gap(self):
    """Add operations to compute the approximate duality gap.

    Returns:
      An Operation that computes the approximate duality gap over all
      examples.
    """
    with name_scope('sdca/approximate_duality_gap'):
      _, values_list = self._hashtable.export_sharded()
      shard_sums = []
      for values in values_list:
        with ops.device(values.device):
          shard_sums.append(
              math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0))
      summed_values = math_ops.add_n(shard_sums)

      primal_loss = summed_values[1]
      dual_loss = summed_values[2]
      example_weights = summed_values[3]
      # Note: we return NaN if there are no weights or all weights are 0, e.g.
      # if no examples have been processed
      return (primal_loss + dual_loss + self._l1_loss() +
              (2.0 * self._l2_loss(self._symmetric_l2_regularization()))
             ) / example_weights
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      routing_probabilities = self.training_ops.k_feature_routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes,
          num_features_per_node=self.params.num_features_per_node,
          layer_num=0,
          random_seed=self.params.base_random_seed)

      output = array_ops.slice(
          routing_probabilities,
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def soft_inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      path_probability, path = (
          self.training_ops.stochastic_hard_routing_function(
              data,
              self.tree_parameters,
              self.tree_thresholds,
              tree_depth=self.params.hybrid_tree_depth,
              random_seed=self.params.base_random_seed))

      output = array_ops.slice(
          self.training_ops.unpack_path(path, path_probability),
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = [layers.fully_connected(data, self.params.layer_size)]

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations.append(
            layers.fully_connected(
                nn_activations[-1],
                self.params.layer_size))

      nn_activations_tensor = array_ops.concat(
          1, nn_activations, name="flattened_nn_activations")

      return nn_activations_tensor
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def add_remote_device(self, remote_device):
    """Requests that fed values are sent to `remote_device`."""
    local_value = self.get_fed_tensors()

    self._num_remote_feeds += 1

    with ops.device(None):  # Bypass any existing device() calls
      with ops.device(remote_device):
        remote_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                           dtypes=self._dtypes,
                                           shapes=self._shapes,
                                           name=self._shared_name,
                                           shared_name=self._shared_name)
        remote_enq_op = remote_q.enqueue(local_value)

    # Add a remote queue runner to feed the remote queue.
    self._add_remote_queue_runner(remote_q, [remote_enq_op])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      routing_probabilities = self.training_ops.k_feature_routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes,
          num_features_per_node=self.params.num_features_per_node,
          layer_num=0,
          random_seed=self.params.base_random_seed)

      output = array_ops.slice(
          routing_probabilities,
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def soft_inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      path_probability, path = (
          self.training_ops.stochastic_hard_routing_function(
              data,
              self.tree_parameters,
              self.tree_thresholds,
              tree_depth=self.params.hybrid_tree_depth,
              random_seed=self.params.base_random_seed))

      output = array_ops.slice(
          self.training_ops.unpack_path(path, path_probability),
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = [layers.fully_connected(data, self.params.layer_size)]

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations.append(
            layers.fully_connected(
                nn_activations[-1],
                self.params.layer_size))

      nn_activations_tensor = array_ops.concat(
          1, nn_activations, name="flattened_nn_activations")

      return nn_activations_tensor
项目:DL-Benchmarks    作者:DL-Benchmarks    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._embedding:
          embedding = self._embedding
        else:
          if self._initializer:
            initializer = self._initializer
          elif vs.get_variable_scope().initializer:
            initializer = vs.get_variable_scope().initializer
          else:
            # Default initializer for embeddings should have variance=1.
            sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
          embedding = vs.get_variable("embedding", [self._embedding_classes,
                                                    self._cell.input_size],
                                      initializer=initializer)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:Machine-Learning    作者:sfeng15    | 项目源码 | 文件源码
def _AddShardedSaveOps(self, filename_tensor, per_device):
    """Add ops to save the params per shard.

    Args:
      filename_tensor: String Tensor.
      per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
        returned by _GroupByDevices().

    Returns:
      An op to save the variables.
    """
    num_shards = len(per_device)
    sharded_saves = []
    num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
    for shard, (device, vars_to_save) in enumerate(per_device):
      with ops.device(device):
        sharded_filename = self.sharded_filename(
            filename_tensor, shard, num_shards_tensor)
        sharded_saves.append(self._AddSaveOps(sharded_filename, vars_to_save))
    # Return the sharded name for the save path.
    with ops.control_dependencies([x.op for x in sharded_saves]):
      # pylint: disable=protected-access
      return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor)
项目:Machine-Learning    作者:sfeng15    | 项目源码 | 文件源码
def _GroupByDevices(self, vars_to_save):
    """Group Variable tensor slices per device.

    TODO(touts): Make sure that all the devices found are on different
    job/replica/task/cpu|gpu.  It would be bad if 2 were on the same device.
    It can happen if the devices as unspecified.

    Args:
      vars_to_save: A list of BaseSaverBuilder.VarToSave objects.

    Returns:
      A list of tuples: (device_name, BaseSaverBuilder.VarToSave) tuples.
      The list is sorted by ascending device_name.
    """
    per_device = collections.defaultdict(lambda: [])
    for var_to_save in vars_to_save:
      canonical_device = pydev.canonical_name(var_to_save.var.device)
      per_device[canonical_device].append(var_to_save)
    return sorted(per_device.items(), key=lambda t: t[0])
项目:diversity_based_attention    作者:PrekshaNema25    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:ROLO    作者:Guanghan    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDeviceFn(self):

    class DevFn(object):

      def __init__(self):
        self.counter = -1

      def __call__(self, op):
        self.counter += 1
        return '/cpu:%d' % self.counter

    with ops.Graph().as_default():
      with arg_scope([variables_lib2.model_variable], device=DevFn()):
        a = variables_lib2.model_variable('a', [5])
        b = variables_lib2.model_variable('b', [20])
        self.assertDeviceEqual(a.device, '/cpu:0')
        self.assertEqual(a.initial_value.op.colocation_groups(),
                         a.op.colocation_groups())
        self.assertDeviceEqual(b.device, '/cpu:1')
        self.assertEqual(b.initial_value.op.colocation_groups(),
                         b.op.colocation_groups())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _testSingleAllReduce(self, sess, np_type, nccl_fn, numpy_accumulation_fn):
    for devices in [['/gpu:0', '/gpu:0', '/gpu:0'], ['/gpu:0', '/gpu:0']]:
      shape = (3, 4)
      np_ans = None
      tensors = []
      for d in devices:
        with ops.device(d):
          t = ((np.random.random_sample(shape) - .5) * 1024).astype(np_type)
          if np_ans is None:
            np_ans = t
          else:
            np_ans = numpy_accumulation_fn(np_ans, t)
          tensors.append(array_ops.identity(t))

      all_reduce_tensors = nccl_fn(tensors)

      # Test shape inference.
      for r in all_reduce_tensors:
        self.assertEqual(shape, r.get_shape())

      # Test execution and results.
      nccl_results = sess.run(all_reduce_tensors)
      for r in nccl_results:
        self.assertAllClose(r, np_ans)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _apply_all_reduce(reduction_op, tensors):
  if not tensors:
    raise ValueError('Must pass >0 tensors to all reduce operations')
  shared_name = _get_shared_name()
  res = []
  for t in tensors:
    if not device.canonical_name(t.device):
      raise ValueError('Device assignment required for nccl collective ops')
    with ops.device(t.device):
      res.append(
          gen_nccl_ops.nccl_all_reduce(
              t,
              reduction=reduction_op,
              num_devices=len(tensors),
              shared_name=shared_name))
  return res
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
    tf_config = {
        'cluster': {
            run_config.TaskType.PS: ['fake_ps_0']
        },
        'task': {
            'type': run_config.TaskType.WORKER,
            'index': 3
        }
    }
    with test.mock.patch.dict('os.environ',
                              {'TF_CONFIG': json.dumps(tf_config)}):
      config = run_config.RunConfig()

    with ops.device(estimator._get_replica_device_setter(config)):
      v = variables_lib.Variable([1, 2])
      w = variables_lib.Variable([2, 1])
      a = v + w
    self.assertDeviceEqual('/job:ps/task:0', v.device)
    self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
    self.assertDeviceEqual('/job:ps/task:0', w.device)
    self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
    self.assertDeviceEqual('/job:worker/task:3', a.device)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
                            num_components, batch_size, num_features,
                            sample_size):
    config = config_pb2.ConfigProto()
    config.allow_soft_placement = True
    np.random.seed(127)
    with session.Session(config=config, graph=ops.Graph()) as sess:
      random_seed.set_random_seed(0)
      with ops.device("/gpu:0" if use_gpu else "/cpu:0"):
        mixture = create_distribution(
            num_components=num_components,
            batch_size=batch_size,
            num_features=num_features)
        sample_op = mixture.sample(sample_size).op
        sess.run(variables.global_variables_initializer())
        reported = self.run_op_benchmark(
            sess,
            sample_op,
            min_iters=10,
            name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
                  (name, use_gpu, num_components, batch_size, num_features,
                   sample_size)))
        print("\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) %
              (use_gpu, num_components, batch_size, num_features, sample_size,
               reported["wall_time"]))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def benchmarkTfRNNLSTMBlockCellTraining(self):
    test_configs = self._GetTestConfig()
    for config_name, config in test_configs.items():
      num_layers = config["num_layers"]
      num_units = config["num_units"]
      batch_size = config["batch_size"]
      seq_length = config["seq_length"]

      with ops.Graph().as_default(), ops.device("/gpu:0"):
        inputs = seq_length * [
            array_ops.zeros([batch_size, num_units], dtypes.float32)
        ]
        cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units)  # pylint: disable=cell-var-from-loop

        multi_cell = core_rnn_cell_impl.MultiRNNCell(
            [cell() for _ in range(num_layers)])
        outputs, final_state = core_rnn.static_rnn(
            multi_cell, inputs, dtype=dtypes.float32)
        trainable_variables = ops.get_collection(
            ops.GraphKeys.TRAINABLE_VARIABLES)
        gradients = gradients_impl.gradients([outputs, final_state],
                                             trainable_variables)
        training_op = control_flow_ops.group(*gradients)
        self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
                          (config_name, self._GetConfigDesc(config)))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testDeadlock(self):
    # Builds a graph of the form:
    #  x -> y
    #       | \
    #       z -> w
    # where x and z are placed on the CPU and y and w are placed on the XLA
    # device. If y and w are clustered for compilation, then the graph will
    # deadlock since the clustered graph will contain a self-loop.
    with self.test_session() as sess:
      with ops.device(CPU_DEVICE):
        x = array_ops.placeholder(dtypes.float32, [2])
      with self.test_scope():
        y = x * 2
      with ops.device(CPU_DEVICE):
        z = y * y
      with self.test_scope():
        w = y + z
      result = sess.run(w, {x: [1.5, 0.5]})
    self.assertAllClose(result, [12., 2.], rtol=1e-3)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testHostMemory(self):
    with self.test_session() as sess:
      x = array_ops.placeholder(dtypes.int32)
      with self.test_scope():
        y = x + 1
      with ops.device(CPU_DEVICE):
        # Place a computation on the CPU, so y and w cannot be merged into the
        # same JIT compilation.
        z = y * 2
      with self.test_scope():
        # Argument 'y' is a non-constant output of a previous cluster. Make sure
        # it is properly copied to host memory so it can be used as a
        # compile-time constant input for this cluster.
        w = array_ops.reshape(z, y)
      result = sess.run(w, {x: [1, 0]})
      expected = np.array([[4], [2]], dtype=np.int32)
      self.assertAllClose(expected, result, rtol=1e-3)
项目:youtube-8m    作者:wangheda    | 项目源码 | 文件源码
def _on_device(fn, device):
    """Build the subgraph defined by lambda `fn` on `device` if it's not None."""
    if device:
        with ops.device(device):
            return fn()
    else:
        return fn()

# pylint: disable=unused-argument
项目:DeepNovo    作者:nh2tran    | 项目源码 | 文件源码
def embed_labels(encoded_spectrum,
                 intensity_inputs_forward,
                 intensity_inputs_backward,
                 decoder_inputs_forward,
                 decoder_inputs_backward,
                 keep_conv,
                 keep_dense):
  """TODO(nh2tran): docstring."""

  with variable_scope.variable_scope("embedding_rnn_decoder"):
    with ops.device("/cpu:0"):
      embedding = variable_scope.get_variable(
          name="embedding",
          shape=[deepnovo_config.vocab_size, deepnovo_config.embedding_size])

    # nobi
    decoder_inputs_forward_emb = [embedding_ops.embedding_lookup(embedding, x)
                                  for x in decoder_inputs_forward]
    decoder_inputs_backward_emb = [embedding_ops.embedding_lookup(embedding, x)
                                   for x in decoder_inputs_backward]

    return (decode_spectrum(encoded_spectrum,
                            intensity_inputs_forward,
                            decoder_inputs_forward_emb,
                            keep_conv,
                            keep_dense,
                            scope="rnn_decoder_forward"),
            decode_spectrum(encoded_spectrum,
                            intensity_inputs_backward,
                            decoder_inputs_backward_emb,
                            keep_conv,
                            keep_dense,
                            scope="rnn_decoder_backward"))
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def __init__(self, params, device_assigner, training=True,
               tree_variables_class=TreeTrainingVariables):
    self.variables = []
    for i in range(params.num_trees):
      with ops.device(device_assigner.get_device(i)):
        self.variables.append(tree_variables_class(params, i, training))
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def get_device(self, unused_tree_num):
    if not self.cached:
      dummy = constant_op.constant(0)
      self.cached = dummy.device

    return self.cached
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(array_ops.pack(sizes))

  # pylint: disable=unused-argument
项目:deep-learning    作者:lbkchen    | 项目源码 | 文件源码
def get_stats(self, session):
    tree_stats = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        tree_stats.append(self.trees[i].get_stats(session))
    return ForestStats(tree_stats, self.params)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def variable(name, shape=None, dtype=None, initializer=None,
             regularizer=None, trainable=True, collections=None,
             caching_device=None, device=None):
  """Gets an existing variable with these parameters or creates a new one.

  Args:
    name: the name of the new or existing variable.
    shape: shape of the new or existing variable.
    dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
    initializer: initializer for the variable if one is created.
    regularizer: a (Tensor -> Tensor or None) function; the result of
        applying it on a newly created variable will be added to the collection
        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
    trainable: If `True` also add the variable to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
    collections: A list of collection names to which the Variable will be added.
      If None it would default to tf.GraphKeys.VARIABLES.
    caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.
    device: Optional device to place the variable. It can be an string or a
      function that is called to get the device for the variable.

  Returns:
    The created or existing variable.
  """
  collections = list(collections or [ops.GraphKeys.VARIABLES])

  # Remove duplicates
  collections = set(collections)
  with ops.device(device or ''):
    return variable_scope.get_variable(name, shape=shape, dtype=dtype,
                                       initializer=initializer,
                                       regularizer=regularizer,
                                       trainable=trainable,
                                       collections=collections,
                                       caching_device=caching_device)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
                   regularizer=None, trainable=True, collections=None,
                   caching_device=None, device=None):
  """Gets an existing model variable with these parameters or creates a new one.

  Args:
    name: the name of the new or existing variable.
    shape: shape of the new or existing variable.
    dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
    initializer: initializer for the variable if one is created.
    regularizer: a (Tensor -> Tensor or None) function; the result of
        applying it on a newly created variable will be added to the collection
        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
    trainable: If `True` also add the variable to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
    collections: A list of collection names to which the Variable will be added.
      Note that the variable is always also added to the `GraphKeys.VARIABLES`
      and `GraphKeys.MODEL_VARIABLES` collections.
    caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.
    device: Optional device to place the variable. It can be an string or a
      function that is called to get the device for the variable.

  Returns:
    The created or existing variable.
  """
  collections = list(collections or [])
  collections += [ops.GraphKeys.VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
  return variable(name, shape=shape, dtype=dtype,
                  initializer=initializer, regularizer=regularizer,
                  trainable=trainable, collections=collections,
                  caching_device=caching_device, device=device)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_tasks=0,
               job_name='ps',
               device_type='CPU',
               device_index=0):
    """Initialize VariableDeviceChooser.

    Usage:
      To use with 2 parameter servers:
        VariableDeviceChooser(2)

      To use without parameter servers:
        VariableDeviceChooser()
        VariableDeviceChooser(device_type='GPU') # For GPU placement

    Args:
      num_tasks: number of tasks.
      job_name: String, a name for the parameter server job.
      device_type: Optional device type string (e.g. "CPU" or "GPU")
      device_index: int.  Optional device index.  If left
        unspecified, device represents 'any' device_index.
    """
    self._job_name = job_name
    self._device_type = device_type
    self._device_index = device_index
    self._num_tasks = num_tasks
    self._next_task_id = 0
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _create_slots(self):
    # Make internal variables which have the updates before applying L1
    # regularization.
    self._slots = collections.defaultdict(list)
    for name in ['sparse_features_weights', 'dense_features_weights']:
      for var in self._variables[name]:
        with ops.device(var.device):
          # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is
          # fixed
          self._slots['unshrinked_' + name].append(var_ops.Variable(
              array_ops.zeros_like(var.initialized_value(), dtypes.float32),
              name=var.op.name + '_unshrinked/SDCAOptimizer'))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _l2_loss(self, l2):
    """Computes the (un-normalized) l2 loss of the model."""
    with name_scope('sdca/l2_loss'):
      sums = []
      for name in ['sparse_features_weights', 'dense_features_weights']:
        for weights in self._convert_n_to_tensor(self._variables[name]):
          with ops.device(weights.device):
            sums.append(
                math_ops.reduce_sum(
                    math_ops.square(math_ops.cast(weights, dtypes.float64))))
      sum = math_ops.add_n(sums)
      # SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
      return l2 * sum / 2.0
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self, params, device_assigner, training=True,
               tree_variables_class=TreeTrainingVariables):
    self.variables = []
    for i in range(params.num_trees):
      with ops.device(device_assigner.get_device(i)):
        self.variables.append(tree_variables_class(params, i, training))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_device(self, unused_tree_num):
    if not self.cached:
      dummy = constant_op.constant(0)
      self.cached = dummy.device

    return self.cached
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def average_size(self):
    """Constructs a TF graph for evaluating the average size of a forest.

    Returns:
      The average number of nodes over the trees.
    """
    sizes = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        sizes.append(self.trees[i].size())
    return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes)))

  # pylint: disable=unused-argument
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def average_impurity(self):
    """Constructs a TF graph for evaluating the leaf impurity of a forest.

    Returns:
      The last op in the graph.
    """
    impurities = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        impurities.append(self.trees[i].average_impurity())
    return math_ops.reduce_mean(array_ops.pack(impurities))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def get_stats(self, session):
    tree_stats = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        tree_stats.append(self.trees[i].get_stats(session))
    return ForestStats(tree_stats, self.params)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      routing_probabilities = self.training_ops.routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes)

      output = array_ops.slice(
          routing_probabilities,
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=variable_scope.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      path_probability, path = self.training_ops.hard_routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes,
          tree_depth=self.params.hybrid_tree_depth)

      output = array_ops.slice(
          self.training_ops.unpack_path(path, path_probability),
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='stochastic_hard_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='stochastic_hard_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _define_vars(self, params, **kwargs):
    with ops.device(self.device_assigner.get_device(self.layer_num)):

      self.tree_parameters = variable_scope.get_variable(
          name='stochastic_soft_tree_parameters_%d' % self.layer_num,
          shape=[params.num_nodes, params.num_features],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))

      self.tree_thresholds = variable_scope.get_variable(
          name='stochastic_soft_tree_thresholds_%d' % self.layer_num,
          shape=[params.num_nodes],
          initializer=init_ops.truncated_normal_initializer(
              mean=params.weight_init_mean, stddev=params.weight_init_std))
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      routes = self.training_ops.routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes)

      leaf_routes = array_ops.slice(
          routes, [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return leaf_routes
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = layers.fully_connected(data, self.params.layer_size)

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations = layers.fully_connected(nn_activations,
                                                self.params.layer_size)
      return nn_activations
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def inference_graph(self, data):
    with ops.device(self.device_assigner.get_device(self.layer_num)):
      # Compute activations for the neural network.
      nn_activations = layers.fully_connected(data, 1)

      # There is always one activation per instance by definition, so squeeze
      # away the extra dimension.
      return array_ops.squeeze(nn_activations, squeeze_dims=[1])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
                   regularizer=None, trainable=True, collections=None,
                   caching_device=None, device=None):
  """Gets an existing model variable with these parameters or creates a new one.

  Args:
    name: the name of the new or existing variable.
    shape: shape of the new or existing variable.
    dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
    initializer: initializer for the variable if one is created.
    regularizer: a (Tensor -> Tensor or None) function; the result of
        applying it on a newly created variable will be added to the collection
        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
    trainable: If `True` also add the variable to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
    collections: A list of collection names to which the Variable will be added.
      Note that the variable is always also added to the
      `GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
    caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.
    device: Optional device to place the variable. It can be an string or a
      function that is called to get the device for the variable.

  Returns:
    The created or existing variable.
  """
  collections = list(collections or [])
  collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
  return variable(name, shape=shape, dtype=dtype,
                  initializer=initializer, regularizer=regularizer,
                  trainable=trainable, collections=collections,
                  caching_device=caching_device, device=device)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def __init__(self,
               num_tasks=0,
               job_name='ps',
               device_type='CPU',
               device_index=0):
    """Initialize VariableDeviceChooser.

    Usage:
      To use with 2 parameter servers:
        VariableDeviceChooser(2)

      To use without parameter servers:
        VariableDeviceChooser()
        VariableDeviceChooser(device_type='GPU') # For GPU placement

    Args:
      num_tasks: number of tasks.
      job_name: String, a name for the parameter server job.
      device_type: Optional device type string (e.g. "CPU" or "GPU")
      device_index: int.  Optional device index.  If left
        unspecified, device represents 'any' device_index.
    """
    self._job_name = job_name
    self._device_type = device_type
    self._device_index = device_index
    self._num_tasks = num_tasks
    self._next_task_id = 0