Python tensorflow.python.ops.init_ops 模块,ones_initializer() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用tensorflow.python.ops.init_ops.ones_initializer()

项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
    ids = tf.contrib.layers.sparse_column_with_keys(
        "ids", ["marlo", "omar", "stringer"])
    ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
                                 indices=[[0, 0], [1, 0], [1, 1]],
                                 shape=[2, 2])
    weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
    weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
                                     indices=[[0, 0], [1, 0], [1, 1]],
                                     shape=[2, 2])
    features = {"ids": ids_tensor,
                "weights": weights_tensor}
    embeded_sparse = tf.contrib.layers.embedding_column(
        weighted_ids, 1, combiner="sum", initializer=init_ops.ones_initializer)
    output = tf.contrib.layers.input_from_feature_columns(features,
                                                          [embeded_sparse])
    with self.test_session():
      tf.initialize_all_variables().run()
      tf.initialize_all_tables().run()
      # score: (sum of weights)
      self.assertAllEqual(output.eval(), [[10.], [50.]])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testEmbeddingColumnForDNN(self):
    hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
    wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
                                  indices=[[0, 0], [1, 0], [1, 1]],
                                  shape=[3, 2])
    features = {"wire": wire_tensor}
    embeded_sparse = tf.contrib.layers.embedding_column(
        hashed_sparse,
        1,
        combiner="sum",
        initializer=init_ops.ones_initializer())
    output = tf.contrib.layers.input_from_feature_columns(features,
                                                          [embeded_sparse])
    with self.test_session():
      tf.global_variables_initializer().run()
      # score: (number of values)
      self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
    ids = tf.contrib.layers.sparse_column_with_keys(
        "ids", ["marlo", "omar", "stringer"])
    ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
                                 indices=[[0, 0], [1, 0], [1, 1]],
                                 shape=[3, 2])
    weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
    weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
                                     indices=[[0, 0], [1, 0], [1, 1]],
                                     shape=[3, 2])
    features = {"ids": ids_tensor,
                "weights": weights_tensor}
    embeded_sparse = tf.contrib.layers.embedding_column(
        weighted_ids,
        1,
        combiner="sum",
        initializer=init_ops.ones_initializer())
    output = tf.contrib.layers.input_from_feature_columns(features,
                                                          [embeded_sparse])
    with self.test_session():
      tf.global_variables_initializer().run()
      tf.initialize_all_tables().run()
      # score: (sum of weights)
      self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEmbeddingColumnForDNN(self):
    hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
    wire_tensor = sparse_tensor.SparseTensor(
        values=["omar", "stringer", "marlo"],
        indices=[[0, 0], [1, 0], [1, 1]],
        dense_shape=[3, 2])
    features = {"wire": wire_tensor}
    embeded_sparse = feature_column.embedding_column(
        hashed_sparse,
        1,
        combiner="sum",
        initializer=init_ops.ones_initializer())
    output = feature_column_ops.input_from_feature_columns(features,
                                                           [embeded_sparse])
    with self.test_session():
      variables_lib.global_variables_initializer().run()
      # score: (number of values)
      self.assertAllEqual(output.eval(), [[1.], [2.], [0.]])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEmbeddingColumnWithMaxNormForDNN(self):
    hashed_sparse = feature_column.sparse_column_with_hash_bucket("wire", 10)
    wire_tensor = sparse_tensor.SparseTensor(
        values=["omar", "stringer", "marlo"],
        indices=[[0, 0], [1, 0], [1, 1]],
        dense_shape=[3, 2])
    features = {"wire": wire_tensor}
    embedded_sparse = feature_column.embedding_column(
        hashed_sparse,
        1,
        combiner="sum",
        initializer=init_ops.ones_initializer(),
        max_norm=0.5)
    output = feature_column_ops.input_from_feature_columns(features,
                                                           [embedded_sparse])
    with self.test_session():
      variables_lib.global_variables_initializer().run()
      # score: (number of values * 0.5)
      self.assertAllClose(output.eval(), [[0.5], [1.], [0.]])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testEmbeddingColumnWithWeightedSparseColumnForDNN(self):
    ids = feature_column.sparse_column_with_keys("ids",
                                                 ["marlo", "omar", "stringer"])
    ids_tensor = sparse_tensor.SparseTensor(
        values=["stringer", "stringer", "marlo"],
        indices=[[0, 0], [1, 0], [1, 1]],
        dense_shape=[3, 2])
    weighted_ids = feature_column.weighted_sparse_column(ids, "weights")
    weights_tensor = sparse_tensor.SparseTensor(
        values=[10.0, 20.0, 30.0],
        indices=[[0, 0], [1, 0], [1, 1]],
        dense_shape=[3, 2])
    features = {"ids": ids_tensor, "weights": weights_tensor}
    embeded_sparse = feature_column.embedding_column(
        weighted_ids,
        1,
        combiner="sum",
        initializer=init_ops.ones_initializer())
    output = feature_column_ops.input_from_feature_columns(features,
                                                           [embeded_sparse])
    with self.test_session():
      variables_lib.global_variables_initializer().run()
      data_flow_ops.tables_initializer().run()
      # score: (sum of weights)
      self.assertAllEqual(output.eval(), [[10.], [50.], [0.]])
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def testEmbeddingColumnForDNN(self):
    hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
    wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
                                  indices=[[0, 0], [1, 0], [1, 1]],
                                  shape=[2, 2])
    features = {"wire": wire_tensor}
    embeded_sparse = tf.contrib.layers.embedding_column(
        hashed_sparse, 1, combiner="sum", initializer=init_ops.ones_initializer)
    output = tf.contrib.layers.input_from_feature_columns(features,
                                                          [embeded_sparse])
    with self.test_session():
      tf.initialize_all_variables().run()
      # score: (number of values)
      self.assertAllEqual(output.eval(), [[1.], [2.]])
项目:u8m_test    作者:hxkk    | 项目源码 | 文件源码
def call(self, inputs, state, scope=None):
        with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
            with vs.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0.
                w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
                                 self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")

                u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")

                w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
                u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")

                z_bias = tf.get_variable(
                    name="z_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer()
                )
                z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z, z_bias))

                r_bias = tf.get_variable(
                    name="r_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r, r_bias))

            with vs.variable_scope("Candidate"):
#                 w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
#                                self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
                u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
                               [1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
                c_bias = tf.get_variable(
                    name="c_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
            new_h = z_gate * state + (1 - z_gate) * c
        return new_h, new_h
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testInitializedVariableValue(self):
    with self.test_session() as sess:
      a = variables_lib2.model_variable(
          'a', [5], initializer=init_ops.ones_initializer())
      sess.run(variables_lib.global_variables_initializer())
      self.assertAllEqual(a.eval(), [1] * 5)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testLSTMCell(self):
    # Run with all-0 weights, no padding.
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.)
    self.assertAllClose(m, [[0.]] * self._batch_size)
    self.assertAllClose(c, [[0.]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.)
    self.assertAllClose(m, [[.25]] * self._batch_size)
    self.assertAllClose(c, [[.5]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.)
    self.assertAllClose(m, [[.0]] * self._batch_size)
    self.assertAllClose(c, [[.0]] * self._batch_size)
    m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.)
    self.assertAllClose(m, [[.25]] * self._batch_size)
    self.assertAllClose(c, [[.5]] * self._batch_size)

    # Run with all-1 weights, no padding.
    for m_prev in [0., 1.]:
      for c_prev in [0., 1.]:
        m, c = self._RunLSTMCell('ones',
                                 init_ops.ones_initializer(), m_prev, c_prev,
                                 0.)
        self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev))
        self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev))

    # Run with random weights.
    for weight in np.random.rand(3):
      weight_tf = constant_op.constant(weight, dtypes.float32)
      random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w)

      # No padding.
      for m_prev in [0., 1.]:
        for c_prev in [0., 1.]:
          m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.)
          self.assertAllClose(m,
                              self._NextM(self._inputs, weight, m_prev, c_prev))
          self.assertAllClose(c,
                              self._NextC(self._inputs, weight, m_prev, c_prev))

      # Set padding.
      for m_prev in [0., 1.]:
        for c_prev in [0., 1.]:
          m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.)
          self.assertAllClose(m, [[m_prev]] * self._batch_size)
          self.assertAllClose(c, [[c_prev]] * self._batch_size)
项目:FindYourCandy    作者:BrainPad    | 项目源码 | 文件源码
def __init__(self, features_size, num_classes, for_predict=False, hidden_size=3):
        self.hidden_size = hidden_size
        self.num_classes = num_classes

        with tf.variable_scope('transfer'):
            self.features = tf.placeholder(tf.float32, (None, features_size), name='features')
            self.label_ids = tf.placeholder(tf.int32, (None,), name='label_ids')

            try:
                ones_initializer = init_ops.ones_initializer()
            except TypeError:
                ones_initializer = init_ops.ones_initializer

            hidden = tf.contrib.layers.fully_connected(
                self.features,
                hidden_size,
                activation_fn=tf.nn.relu,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                biases_initializer=ones_initializer,
                trainable=True
            )

            self.keep_prob = tf.placeholder(tf.float32)
            hidden_drop = tf.nn.dropout(hidden, self.keep_prob)

            logits = tf.contrib.layers.fully_connected(
                hidden_drop,
                num_classes,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                biases_initializer=ones_initializer,
                trainable=True
            )

        if not for_predict:
            # add loss operation if initializing for training
            one_hot = tf.one_hot(self.label_ids, num_classes, name='target')
            self.loss_op = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits, one_hot)
            )

        self.softmax_op = tf.nn.softmax(logits)
        self.saver = tf.train.Saver()

        if not for_predict:
            # add train operation and summary operation if initializing for training
            # Optimizer
            with tf.variable_scope('optimizer'):
                self.global_step = tf.Variable(0, name='global_step', trainable=False)
            # Summaries
            with tf.variable_scope('summaries'):
                tf.scalar_summary('in sample loss', self.loss_op)
                self.summary_op = tf.merge_all_summaries()
项目:u8m_test    作者:hxkk    | 项目源码 | 文件源码
def __call__(self, inputs, state, scope=None):
        isp = inputs.get_shape().as_list()
        M, H, W, C = self.input_size # S: Merged input number 
        assert isp[-1] == M * H * W * C
        mergedInputs = tf.reshape(inputs, shape=(-1, M, H, W, C))
        inputs, prevState = tf.unstack(mergedInputs, axis=1, name="unstack")

        with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
            with vs.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0.
                w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
                                 self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")

                u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")

                pervU_zr = self._conv(prevState, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="PrevUzrConv")

                w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
                u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")
                prevU_z, prevU_r =tf.split(value=pervU_zr, num_or_size_splits=2, axis=3, name="prevU_split")

                z_bias = tf.get_variable(
                    name="z_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer()
                )
                z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z + prevU_z, z_bias))

                r_bias = tf.get_variable(
                    name="r_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r + prevU_r, r_bias))

            with vs.variable_scope("Candidate"):
#                 w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
#                                self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
                u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
                               [1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
                c_bias = tf.get_variable(
                    name="c_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
            new_h = z_gate * state + (1 - z_gate) * c
        return new_h, new_h