Python tensorflow 模块,Tensors() 实例源码

我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用tensorflow.Tensors()

项目:kaggle_redefining_cancer_treatment    作者:jorgemf    | 项目源码 | 文件源码
def _map(self, example_serialized, features=None):
        """
        Maps a example_serialized read from the dataset into the final set of tf.Tensors
        to return to the model.

        Simple example:

        def _parse(line, features=None):
            a, b = [np.int32(x) for x in line.split()]
            return a, b

        t_input, t_ouptut = tf.py_func(_parse, [line], [tf.int32, tf.int32],
                                       stateful=True, name='py_parse_example')
        t_ouptut = tf.add(t_ouptut, 1)

        return t_input, t_ouptut

        :param example_serialized: the example serialized
        :param features: do not use this as it is deprecated after 1.2
        :return: a tuple of the tensors to return when get_next is called. Usually (inputs,outputs)
        """
        pass
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def var_list(self, mode=VlMode.RAW):
        """
        Get the chunks that define this variable.

        :param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
                         or MergedVariables
                     VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
                     MergedVariable
                     VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
        :return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
        """
        if mode == VlMode.RAW:
            return self._var_list
        elif mode == VlMode.BASE:
            return self._get_base_variable_list()
        elif mode == VlMode.TENSOR:
            return self._var_list_as_tensors()  # return w unic tensor + copies augmented
        else:
            raise NotImplementedError('mode %d does not exists' % mode)
项目:tensorport-template    作者:tensorport    | 项目源码 | 文件源码
def _map(self, example_serialized, features=None):
        """
        Maps a example_serialized read from the dataset into the final set of tf.Tensors
        to return to the model.

        Simple example:

        def _parse(line, features=None):
            a, b = [np.int32(x) for x in line.split()]
            return a, b

        t_input, t_ouptut = tf.py_func(_parse, [line], [tf.int32, tf.int32],
                                       stateful=True, name='py_parse_example')
        t_ouptut = tf.add(t_ouptut, 1)

        return t_input, t_ouptut

        :param example_serialized: the example serialized
        :param features: do not use this as it is deprecated after 1.2
        :return: a tuple of the tensors to return when get_next is called. Usually (inputs,outputs)
        """
        pass
项目:DocumentSegmentation    作者:SeguinBe    | 项目源码 | 文件源码
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \
        -> (tf.Tensor, list):  # list of tf.Tensors (layers)
    intermediate_levels = []
    # intermediate_levels.append(input_tensor)
    with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)):
        with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc:
            input_tensor = mean_substraction(input_tensor)
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope(
                    [layers.conv2d, layers.fully_connected, layers.max_pool2d],
                    outputs_collections=end_points_collection):
                net = layers.repeat(
                    input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1')
                intermediate_levels.append(net)
                net = layers.max_pool2d(net, [2, 2], scope='pool1')
                if blocks >= 2:
                    net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool2')
                if blocks >= 3:
                    net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool3')
                if blocks >= 4:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool4')
                if blocks >= 5:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool5')

                return net, intermediate_levels
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def _filter_tensor(inputs, cond, *args):
    """ Create indixes and elements of inputs which consists for which cond is True.

    Parameters
    ----------
        inputs: tf.Tensor
            input tensor
        cond: callable or float
            condition to choose elements. If float, elements which greater the cond will be choosen
        *args: tf.Tensors:
            tensors with the same shape as inputs. Will be returned corresponding elements of them.

    Returns
    -------
        indices: tf.Tensor
            indices of elements of inputs for which cond is True
        tf.Tensors:
            filtred inputs and tensors from args.
    """
    with tf.variable_scope('filter_tensor'):
        if not callable(cond):
            callable_cond = lambda x: x > cond
        else:
            callable_cond = cond
        indices = tf.where(callable_cond(inputs))
        output = (indices, *[tf.gather_nd(x, indices) for x in [inputs, *args]])
    return output
项目:dataset    作者:analysiscenter    | 项目源码 | 文件源码
def output(self, inputs, ops=None, prefix=None, **kwargs):
        """ Add output operations to a model graph, like predictions, quality metrics, etc.

        Parameters
        ----------
        inputs : tf.Tensor or a sequence of tf.Tensors
            input tensors

        ops : a sequence of str
            operation names::
            - 'sigmoid' - add ``sigmoid(inputs)``
            - 'proba' - add ``softmax(inputs)``
            - 'labels' - add ``argmax(inputs)``
            - 'accuracy' - add ``mean(predicted_labels == true_labels)``

        prefix : a sequence of str
            a prefix for each input if there are multiple inputs

        Raises
        ------
        ValueError if the number of outputs does not equal to the number of prefixes
        TypeError if inputs is not a Tensor or a sequence of Tensors
        """
        kwargs = self.fill_params('output', **kwargs)
        predictions_op = self.pop('predictions', kwargs, default=None)

        if ops is None:
            ops = []
        elif not isinstance(ops, (list, tuple)):
            ops = [ops]

        if not isinstance(inputs, (tuple, list)):
            inputs = [inputs]
            prefix = prefix or 'output'
            prefix = [prefix]

        if len(inputs) != len(prefix):
            raise ValueError('Each output in multiple output models should have its own prefix')

        for i, tensor in enumerate(inputs):
            if not isinstance(tensor, tf.Tensor):
                raise TypeError("Network output is expected to be a Tensor, but given {}".format(type(inputs)))

            current_prefix = prefix[i]
            if current_prefix:
                ctx = tf.variable_scope(current_prefix)
                ctx.__enter__()
            else:
                ctx = None
            attr_prefix = current_prefix + '_' if current_prefix else ''

            pred_prefix = '' if len(inputs) == 1 else attr_prefix
            self._add_output_op(tensor, predictions_op, 'predictions', pred_prefix, **kwargs)
            for oper in ops:
                self._add_output_op(tensor, oper, oper, attr_prefix, **kwargs)

            if ctx:
                ctx.__exit__(None, None, None)