Python tensorflow 模块,dynamic_partition() 实例源码

我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用tensorflow.dynamic_partition()

项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def triplet_loss(infer, labels, radius = 2.0):
    """
    Args:
        infer: inference concatenate together with 2 * batch_size
        labels: 0 or 1 with batch_size
        radius:
    Return:
        loss: triplet loss
    """

    feature_1, feature_2 = tf.split(0,2,infer)

    feature_diff = tf.reduce_sum(tf.square(feature_1 - feature_2), 1)
    feature_list = tf.dynamic_partition(feature_diff, labels, 2)

    pos_list = feature_list[1]
    neg_list  = (tf.maximum(0.0, radius * radius - feature_list[0]))
    full_list = tf.concat(0,[pos_list, neg_list])
    loss = tf.reduce_mean(full_list)

    return loss
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def huber_loss(infer, label, epsilon, layer_name):
    """
    Args:
        infer
        label
        epsilon
        layer_name
    """
    with tf.variable_scope(layer_name):
        abs_diff = tf.abs(tf.sub(infer, label));
        index = tf.to_int32(abs_diff <= epsilon, name = 'partition_index')
        l1_part, l2_part = tf.dynamic_partition(abs_diff, index, 2)
        #l1_loss = tf.reduce_mean(l1_part, name = 'l1_loss')
        #l2_loss = tf.reduce_mean(tf.square(l2_part), name = 'l2_loss')
        l1_part_loss = epsilon * (l1_part - 0.5 * epsilon)
        l2_part_loss = 0.5 * tf.square(l2_part)
        hloss = tf.reduce_mean(tf.concat(0, [l1_part_loss,l2_part_loss]), 
                    name = 'huber_loss_sum')
    return hloss
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def triplet_loss(infer, labels, radius = 2.0):
    """
    Args:
        infer: inference concatenate together with 2 * batch_size
        labels: 0 or 1 with batch_size
        radius:
    Return:
        loss: triplet loss
    """

    feature_1, feature_2 = tf.split(0,2,infer)

    feature_diff = tf.reduce_sum(tf.square(feature_1 - feature_2), 1)
    feature_list = tf.dynamic_partition(feature_diff, labels, 2)

    pos_list = feature_list[1]
    neg_list  = (tf.maximum(0.0, radius * radius - feature_list[0]))
    full_list = tf.concat(0,[pos_list, neg_list])
    loss = tf.reduce_mean(full_list)

    return loss
项目:traffic_video_analysis    作者:polltooh    | 项目源码 | 文件源码
def huber_loss(infer, label, epsilon, layer_name):
    """
    Args:
        infer
        label
        epsilon
        layer_name
    """
    with tf.variable_scope(layer_name):
        abs_diff = tf.abs(tf.sub(infer, label));
        index = tf.to_int32(abs_diff <= epsilon, name = 'partition_index')
        l1_part, l2_part = tf.dynamic_partition(abs_diff, index, 2)
        #l1_loss = tf.reduce_mean(l1_part, name = 'l1_loss')
        #l2_loss = tf.reduce_mean(tf.square(l2_part), name = 'l2_loss')
        l1_part_loss = epsilon * (l1_part - 0.5 * epsilon)
        l2_part_loss = 0.5 * tf.square(l2_part)
        hloss = tf.reduce_mean(tf.concat(0, [l1_part_loss,l2_part_loss]), 
                    name = 'huber_loss_sum')
    return hloss
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def select_present(x, presence, batch_size=1, name='select_present'):
    with tf.variable_scope(name):
        presence = 1 - tf.to_int32(presence)  # invert mask

        bs = x.get_shape()[0]
        if bs != None:  # here type(bs) is tf.Dimension and == is ok
            batch_size = int(bs)

        num_partitions = 2 * batch_size
        r = tf.range(0, num_partitions,  2)
        r.set_shape(tf.TensorShape(batch_size))
        r = broadcast_against(r, presence)

        presence += r

        selected = tf.dynamic_partition(x, presence, num_partitions)
        selected = tf.concat(axis=0, values=selected)
        selected = tf.reshape(selected, tf.shape(x))

    return selected
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return tf.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = tf.cast(assignments, tf.int32)
      sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards)
      sharded_values = tf.dynamic_partition(values, assignments, num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(tf.scatter_update(factor[i],
                                         sharded_ids[i],
                                         sharded_values[i]))
      return tf.group(*updates)
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return tf.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = tf.cast(assignments, tf.int32)
      sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards)
      sharded_values = tf.dynamic_partition(values, assignments, num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(tf.scatter_update(factor[i],
                                         sharded_ids[i],
                                         sharded_values[i]))
      return tf.group(*updates)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def split_apply_merge(inp, partitions, fns):
  """Split input according to partitions.  Pass results through fns and merge.

  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.

  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
  new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
  new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
  new_indices = tf.dynamic_partition(
      tf.range(0, inp.get_shape()[0]), partitions, len(fns))
  return tf.dynamic_stitch(new_indices, new_outputs)
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _partition_and_stitch(self, args, func_name):
        """
        args is a list of tensors, to be passed to self.likelihoods.<func_name>

        args[-1] is the 'Y' argument, which contains the indexes to self.likelihoods.

        This function splits up the args using dynamic_partition, calls the
        relevant function on the likelihoods, and re-combines the result.
        """
        # get the index from Y
        Y = args[-1]
        ind = tf.gather(tf.transpose(Y), tf.shape(Y)[1]-1)  # ind = Y[:,-1]
        ind = tf.cast(ind, tf.int32)
        Y = tf.transpose(tf.gather(tf.transpose(Y), tf.range(0, tf.shape(Y)[1]-1)))  # Y = Y[:,:-1]
        args[-1] = Y

        # split up the arguments into chunks corresponding to the relevant likelihoods
        args = zip(*[tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])

        # apply the likelihood-function to each section of the data

        with params_as_tensors_for(self, convert=False):
            funcs = [getattr(lik, func_name) for lik in self.likelihood_list]
        results = [f(*args_i) for f, args_i in zip(funcs, args)]

        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_likelihoods)
        results = tf.dynamic_stitch(partitions, results)

        return results
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def __call__(self, X):
        ind = tf.gather(tf.transpose(X), tf.shape(X)[1]-1)  # ind = X[:,-1]
        ind = tf.cast(ind, tf.int32)
        X = tf.transpose(tf.gather(tf.transpose(X), tf.range(0, tf.shape(X)[1]-1)))  # X = X[:,:-1]

        # split up X into chunks corresponding to the relevant likelihoods
        x_list = tf.dynamic_partition(X, ind, self.num_meanfunctions)
        # apply the likelihood-function to each section of the data
        results = [m(x) for x, m in zip(x_list, self.meanfunction_list)]
        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_meanfunctions)
        return tf.dynamic_stitch(partitions, results)
项目:TensorFlow-in-a-Nutshell    作者:camrongodbout    | 项目源码 | 文件源码
def split_apply_merge(inp, partitions, fns):
  """Split input according to partitions.  Pass results through fns and merge.
  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.
  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
  new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
  new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
  new_indices = tf.dynamic_partition(
      tf.range(0, inp.get_shape()[0]), partitions, len(fns))
  return tf.dynamic_stitch(new_indices, new_outputs)
项目:tensorflow-extenteten    作者:raviqqe    | 项目源码 | 文件源码
def dynamic_partition(data, partitions, num_partitions, name=None):
    return tf.map_fn(
        lambda args: tf.dynamic_partition(*args, num_partitions, name=name),
        [data, partitions])
项目:tf-example-models    作者:aakhundov    | 项目源码 | 文件源码
def get_parameter_updaters(self, data, gamma_weighted, gamma_sum):
        tf_parameter_updaters = []
        for dim in range(self.dims):
            tf_partition = tf.dynamic_partition(gamma_weighted, data[0][:, dim], self.counts[dim])
            tf_new_means = tf.parallel_stack([tf.reduce_sum(p) for p in tf_partition])
            tf_parameter_updaters.append(self.tf_means[dim].assign(tf_new_means))

        return tf_parameter_updaters
项目:TerpreT    作者:51alg    | 项目源码 | 文件源码
def apply_factor(tensor, *args, **kwargs):
    scope = kwargs.pop("scope", "")     
    with tf.name_scope(scope):
        n_args = len(args)

        if n_args is 0:
            tensor, output_size, error_symbol = tensor
            return one_hot(tensor, output_size, scope=scope)
        else:
            tensor, args = slice_out_int_literals(tensor, list(args))
            args, is_batched = make_batch_consistent(args)
            tensor, output_size, error_symbol = tensor

            # handle the case where all arguments were int literals
            tensor_dim_sizes = [dim.value for dim in tensor.get_shape()]
            if not tensor_dim_sizes:
                return one_hot(tensor, output_size, scope=scope)

            # Each arg is batch size x arg dim. Add dimensions to enable broadcasting.
            for i, arg in enumerate(args):
                for j in range(len(args)):
                    if j == i: continue
                    args[i] = tf.expand_dims(args[i], j + 1)

            # compute joint before tensor is applied
            joint = 0
            for arg in args:
                joint = joint + arg

            # prepare for unsorted_segment_sum
            joint = tf.reshape(joint, (-1, np.prod(tensor_dim_sizes)))
            joint = tf.transpose(joint, [1, 0])  # |tensor| x batch_size

            flat_tensor = tf.reshape(tensor, [-1])
            if error_symbol is not None:
                to_logsumexp = tf.dynamic_partition(joint, flat_tensor, output_size + 1)
                del to_logsumexp[error_symbol]
            else:
                to_logsumexp = tf.dynamic_partition(joint, flat_tensor, output_size)



            result = tf.pack(
                        map(lambda x : logsumexp(x, reduction_indices=0), to_logsumexp)
                    )

            result = tf.transpose(result, [1, 0])
            if not is_batched: result = tf.squeeze(result)
            return result