Python tensorflow 模块,sparse_reorder() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用tensorflow.sparse_reorder()

项目:sdp    作者:tansey    | 项目源码 | 文件源码
def get_sparse_penalty_matrix(num_classes):
    '''Creates a sparse graph-fused lasso penalty matrix (zero'th order trendfiltering)
    under the assumption that the class bins are arranged along an evenly spaced
    p-dimensional grid.'''
    bins = [np.arange(c) for c in num_classes]
    idx_map = {t: idx for idx, t in enumerate(itertools.product(*bins))}
    indices = []
    values = []
    rows = 0
    for idx1,t1 in enumerate(itertools.product(*bins)):
        for dim in xrange(len(t1)):
            if t1[dim] < (num_classes[dim]-1):
                t2 = t_offset(t1, dim, 1)
                idx2 = idx_map[t2]
                indices.append([rows, idx1])
                values.append(1)
                indices.append([rows, idx2])
                values.append(-1)
                rows += 1
    # tensorflow version
    #D_shape = [rows, np.prod(num_classes)]
    #return tf.sparse_reorder(tf.SparseTensor(indices=indices, values=values, shape=D_shape))
    # Use scipy's sparse libraries until tensorflow's sparse matrix multiplication is implemented fully
    D_shape = (rows, np.prod(num_classes))
    row_indices = [x for x,y in indices]
    col_indices = [y for x,y in indices]
    return coo_matrix((values, (row_indices, col_indices)), shape=D_shape)
项目:sdp    作者:tansey    | 项目源码 | 文件源码
def scipy_sparse_coo_to_tensorflow_sparse(x):
    values = x.data
    indices = list(zip(x.row, x.col))
    shape = x.shape
    return tf.sparse_reorder(tf.SparseTensor(indices=indices, values=values, dense_shape=shape))
项目:tensorflow_compact_bilinear_pooling    作者:ronghanghu    | 项目源码 | 文件源码
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor sketch operation in compact bilinear
    pooling

    Args:
        rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
        rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
        output_dim: the output dimensions of compact bilinear pooling.

    Returns:
        a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix
项目:gcn_metric_learning    作者:sk1712    | 项目源码 | 文件源码
def chebyshev5(self, x, L, Fout, K):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=False)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
项目:gcn_metric_learning    作者:sk1712    | 项目源码 | 文件源码
def chebyshev5(self, x, L, Fout, K, regularization=False):
        N, M, Fin = x.get_shape()
        N, M, Fin = int(N), int(M), int(Fin)
        # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
        L = scipy.sparse.csr_matrix(L)
        L = graph.rescale_L(L, lmax=2)
        L = L.tocoo()
        indices = np.column_stack((L.row, L.col))
        L = tf.SparseTensor(indices, L.data, L.shape)
        L = tf.sparse_reorder(L)
        # Transform to Chebyshev basis
        x0 = tf.transpose(x, perm=[1, 2, 0])  # M x Fin x N
        x0 = tf.reshape(x0, [M, Fin*N])  # M x Fin*N
        x = tf.expand_dims(x0, 0)  # 1 x M x Fin*N
        def concat(x, x_):
            x_ = tf.expand_dims(x_, 0)  # 1 x M x Fin*N
            return tf.concat(0, [x, x_])  # K x M x Fin*N
        if K > 1:
            x1 = tf.sparse_tensor_dense_matmul(L, x0)
            x = concat(x, x1)
        for k in range(2, K):
            x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0  # M x Fin*N
            x = concat(x, x2)
            x0, x1 = x1, x2
        x = tf.reshape(x, [K, M, Fin, N])  # K x M x Fin x N
        x = tf.transpose(x, perm=[3,1,2,0])  # N x M x Fin x K
        x = tf.reshape(x, [N*M, Fin*K])  # N*M x Fin*K
        # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
        W = self._weight_variable([Fin*K, Fout], regularization=regularization)
        x = tf.matmul(x, W)  # N*M x Fout
        return tf.reshape(x, [N, M, Fout])  # N x M x Fout
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _generate_sketch_matrix(rand_h, rand_s, output_dim):
    """
    Return a sparse matrix used for tensor sketch operation in compact bilinear
    pooling

    Args:
        rand_h: an 1D numpy array containing indices in interval `[0, output_dim)`.
        rand_s: an 1D numpy array of 1 and -1, having the same shape as `rand_h`.
        output_dim: the output dimensions of compact bilinear pooling.

    Returns:
        a sparse matrix of shape [input_dim, output_dim] for tensor sketch.
    """

    # Generate a sparse matrix for tensor count sketch
    rand_h = rand_h.astype(np.int64)
    rand_s = rand_s.astype(np.float32)
    assert(rand_h.ndim==1 and rand_s.ndim==1 and len(rand_h)==len(rand_s))
    assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))

    input_dim = len(rand_h)
    indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],
                              rand_h[..., np.newaxis]), axis=1)
    sparse_sketch_matrix = tf.sparse_reorder(
        tf.SparseTensor(indices, rand_s, [input_dim, output_dim]))
    return sparse_sketch_matrix
项目:TensorGraph    作者:hycis    | 项目源码 | 文件源码
def _train_fprop(self, state_below):
        idx, val = state_below
        X = tf.SparseTensor(tf.cast(idx, 'int64'), val, shape=[self.batchsize, self.prev_dim])
        X_order = tf.sparse_reorder(X)
        XW = tf.sparse_tensor_dense_matmul(X_order, self.W, adjoint_a=False, adjoint_b=False)
        return tf.add(XW, self.b)