Python numpy 模块,tril_indices() 实例源码

我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用numpy.tril_indices()

项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def get_metrics_per_contact_type(predicted_cmap, target_cmap):
    L = predicted_cmap.shape[0]
    short_range  = np.zeros((L, L), dtype = np.bool)
    medium_range = np.zeros((L, L), dtype = np.bool)
    long_range   = np.zeros((L, L), dtype = np.bool)
    more_than_6  = np.tril_indices(L, -6)
    more_than_12 = np.tril_indices(L, -12)
    more_than_24 = np.tril_indices(L, -24)
    short_range[more_than_6] = True
    short_range[more_than_12] = False
    medium_range[more_than_12] = True
    medium_range[more_than_24] = False
    long_range[more_than_24] = True
    short_range = np.logical_or(short_range, short_range.T)
    medium_range = np.logical_or(medium_range, medium_range.T)
    long_range = np.logical_or(long_range, long_range.T)
    short_range_metrics = get_metrics(predicted_cmap[short_range], target_cmap[short_range])
    medium_range_metrics = get_metrics(predicted_cmap[medium_range], target_cmap[medium_range])
    long_range_metrics = get_metrics(predicted_cmap[long_range], target_cmap[long_range])
    return short_range_metrics, medium_range_metrics, long_range_metrics
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def _chol_idx(self, n_C, rank):
        l_idx = np.tril_indices(n_C)
        if rank is not None:
            # The rank of covariance matrix is specified
            idx_rank = np.where(l_idx[1] < rank)
            l_idx = (l_idx[0][idx_rank], l_idx[1][idx_rank])
            logger.info('Using the rank specified by the user: '
                        '{}'.format(rank))
        else:
            rank = n_C
            # if not specified, we assume you want to
            # estimate a full rank matrix
            logger.warning('Please be aware that you did not specify the'
                           ' rank of covariance matrix to estimate.'
                           'I will assume that the covariance matrix '
                           'shared among voxels is of full rank.'
                           'Rank = {}'.format(rank))
            logger.warning('Please be aware that estimating a matrix of '
                           'high rank can be very slow.'
                           'If you have a good reason to specify a rank '
                           'lower than the number of experiment conditions,'
                           ' do so.')
        return l_idx, rank
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def forward(self, x):
        """
        Transforms from the free state to the variable.
        Args:
            x: Free state vector. Must have length of `self.num_matrices` *
                triangular_number.

        Returns:
            Reconstructed variable.
        """
        L = self._validate_vector_length(len(x))
        matsize = int((L * 8 + 1) ** 0.5 * 0.5 - 0.5)
        xr = np.reshape(x, (self.num_matrices, -1))
        var = np.zeros((matsize, matsize, self.num_matrices), settings.float_type)
        for i in range(self.num_matrices):
            indices = np.tril_indices(matsize, 0)
            var[indices + (np.zeros(len(indices[0])).astype(int) + i,)] = xr[i, :]
        return var.squeeze() if self.squeeze else var
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def vec_to_tri(vectors, N):
    """
    Takes a D x M tensor `vectors' and maps it to a D x matrix_size X matrix_sizetensor
    where the where the lower triangle of each matrix_size x matrix_size matrix is
    constructed by unpacking each M-vector.

    Native TensorFlow version of Custom Op by Mark van der Wilk.

    def int_shape(x):
        return list(map(int, x.get_shape()))

    D, M = int_shape(vectors)
    N = int( np.floor( 0.5 * np.sqrt( M * 8. + 1. ) - 0.5 ) )
    # Check M is a valid triangle number
    assert((matrix * (N + 1)) == (2 * M))
    """
    indices = list(zip(*np.tril_indices(N)))
    indices = tf.constant([list(i) for i in indices], dtype=tf.int64)

    def vec_to_tri_vector(vector):
        return tf.scatter_nd(indices=indices, shape=[N, N], updates=vector)

    return tf.map_fn(vec_to_tri_vector, vectors)
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def makeImgPatchPrototype(D, compID):
    ''' Create image patch prototype for specific component
        Returns
        --------
        Xprototype : sqrt(D) x sqrt(D) matrix
    '''
    # Create a "prototype" image patch of PxP pixels
    P = np.sqrt(D)
    Xprototype = np.zeros((P, P))
    if compID % 4 == 0:
        Xprototype[:P / 2] = 1.0
        Xprototype = np.rot90(Xprototype, compID / 4)
    if compID % 4 == 1:
        Xprototype[np.tril_indices(P)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 1) / 4)
    if compID % 4 == 2:
        Xprototype[np.tril_indices(P, 2)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 2) / 4)
    if compID % 4 == 3:
        Xprototype[np.tril_indices(P, -2)] = 1
        Xprototype = np.rot90(Xprototype, (compID - 3) / 4)
    return Xprototype
项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def extract_patches(self, all_seqdata, with_targets=True, inplace=False):
        print("\tConverting dataset to Numpy array...")
        L0_X, L0_Y = list(), list()
        for seqdata in all_seqdata:
            if not inplace:
                seqdata = copy.deepcopy(seqdata)
            L = len(seqdata)
            tril_indices = np.tril_indices(L, -1) # Lower triangle without diagonal
            cpreds = seqdata.cpreds
            for cpred in cpreds:
                assert(len(cpred.shape) == 2)
                if cpred is not None:
                    cpred_shape = cpred.shape
            # Create contact maps from file data
            print(len(cpreds))
            for i in range(len(cpreds)):
                if cpreds[i] is None:
                    cpreds[i] = np.full(cpred_shape, Params.DISTANCE_NAN, dtype=Params.FLOAT_DTYPE)
                else:
                    cpreds[i][np.isnan(cpreds[i])] = Params.DISTANCE_NAN
                cpreds[i] = np.asarray(cpreds[i][tril_indices], dtype=Params.FLOAT_DTYPE)
            cpreds = np.asarray(cpreds, dtype=Params.FLOAT_DTYPE).T
            L0_X.append(cpreds)
            if with_targets:
                contacts = distances_to_contacts(seqdata.distances)
                contacts = contacts[tril_indices]
                L0_Y.append(contacts)

        L0_X = np.concatenate(L0_X, axis=0)
        if with_targets:
            L0_Y = np.concatenate(L0_Y, axis=0)
        return (L0_X, L0_Y) if with_targets else L0_X
项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def separation_under_k_aa(cmap):
    L = cmap.shape[0]
    mask = np.ones((L, L), dtype=np.bool)
    more_than_k = np.tril_indices(L, -Params.MIN_AA_SEPARATION)
    mask[more_than_k] = False
    mask = np.logical_and(mask, mask.T)
    return mask
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _non_diagonal_idx_array(batch_size, n):
    idx_offsets = np.arange(
        start=0, stop=batch_size * n * n, step=n * n, dtype=np.int32).reshape(
        (batch_size, 1))
    idx = np.ravel_multi_index(
        np.tril_indices(n, -1), (n, n)).reshape((1, -1)).astype(np.int32)
    return cuda.to_gpu(idx + idx_offsets)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _get_batch_non_diagonal_cpu(array):
    batch_size, m, n = array.shape
    assert m == n
    rows, cols = np.tril_indices(n, -1)
    return array[:, rows, cols]
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def _set_batch_non_diagonal_cpu(array, non_diag_val):
    batch_size, m, n = array.shape
    assert m == n
    rows, cols = np.tril_indices(n, -1)
    array[:, rows, cols] = non_diag_val
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def check_forward(self, diag_data, non_diag_data):
        diag = chainer.Variable(diag_data)
        non_diag = chainer.Variable(non_diag_data)
        y = lower_triangular_matrix(diag, non_diag)

        correct_y = numpy.zeros(
            (self.batch_size, self.n, self.n), dtype=numpy.float32)

        tril_rows, tril_cols = numpy.tril_indices(self.n, -1)
        correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data)

        diag_rows, diag_cols = numpy.diag_indices(self.n)
        correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data)

        gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
项目:semi-auto-anno    作者:moberweger    | 项目源码 | 文件源码
def getImageDescriptors_HOG_cdist(self, all_emb, ref_emb, ref_mask):
        # unnormalized cosine distance for HOG
        dist = numpy.dot(all_emb, ref_emb.T)
        # normalize by length of query descriptor projected on reference
        norm = numpy.sqrt(numpy.dot(numpy.square(all_emb), ref_mask.T))
        dist /= norm
        dist[numpy.isinf(dist)] = 0.
        dist[numpy.isnan(dist)] = 0.

        # dist[numpy.triu_indices(dist.shape[0], 1)] = numpy.maximum(dist[numpy.triu_indices(dist.shape[0], 1)],
        #                                                            dist.T[numpy.triu_indices(dist.shape[0], 1)])
        # dist[numpy.tril_indices(dist.shape[0], -1)] = 0.
        # dist += dist.T

        return dist
项目:GDAL_Python3    作者:jdegene    | 项目源码 | 文件源码
def mk_test(x, alpha = 0.05):

    n = len(x)  

    # calculate S    
    listMa = np.matrix(x)               # convert input List to 1D matrix
    subMa = np.sign(listMa.T - listMa)  # calculate all possible differences in matrix
                                        # with itself and save only sign of difference (-1,0,1)
    s = np.sum( subMa[np.tril_indices(n,-1)] ) # sum lower left triangle of matrix

    # calculate the unique data
    # return_counts=True returns a second array that is equivalent to tp in old version
    unique_x = np.unique(x, return_counts=True)
    g = len(unique_x[0])

    # calculate the var(s)
    if n == g: # there is no tie
        var_s = (n*(n-1)*(2*n+5))/18
    else: # there are some ties in data       
        tp = unique_x[1]        
        var_s = (n*(n-1)*(2*n+5) + np.sum(tp*(tp-1)*(2*tp+5)))/18

    if s>0:
        z = (s - 1)/np.sqrt(var_s)
    elif s == 0:
            z = 0
    elif s<0:
        z = (s + 1)/np.sqrt(var_s)

    # calculate the p_value
    p = 2*(1-scipy.stats.norm.cdf(abs(z))) # two tail test
    h = abs(z) > scipy.stats.norm.ppf(1-alpha/2) 


    return h, p


#########################################################################################
#hdf to tiff
#########################################################################################
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def reference_inverse(self, matrices):
        # This is the inverse operation of the vec_to_tri op being tested.
        D, N, _ = matrices.shape
        M = (N * (N + 1)) // 2
        tril_indices = np.tril_indices(N)
        output = np.zeros((D, M))
        for vector_index in range(D):
            matrix = matrices[vector_index, :]
            output[vector_index, :] = matrix[tril_indices]
        return output
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def backward(self, y):
        """
        Transforms from the variable to the free state.
        Args:
            y: Variable representation.

        Returns:
            Free state.
        """
        N = int(np.sqrt(y.size / self.num_matrices))
        reshaped = np.reshape(y, (N, N, self.num_matrices))
        size = len(reshaped)
        triangular = reshaped[np.tril_indices(size, 0)].T
        return triangular
项目:pyMTL    作者:bibliolytic    | 项目源码 | 文件源码
def unvech_kh(v, cols_stacked=True, norm_conserved=False):
    # Restore matrix dimension and add triangular
    v = v.flatten()
    d = int(0.5 * (np.sqrt(8 * len(v) + 1) - 1))
    X = np.empty((d, d))
    triu_idx_r = []
    triu_idx_c = []
    # Find appropriate indexes
    if cols_stacked:
        for c in range(0, d):
            for r in range(0, c+1):
                triu_idx_r.append(r)
                triu_idx_c.append(c)
    else:
        for r in range(0, d):
            for c in range(r, d):
                triu_idx_r.append(r)
                triu_idx_c.append(c)
    # Restore original matrix
    triu_idx = (triu_idx_r, triu_idx_c)
    X[triu_idx] = v
    X[np.tril_indices(d)] = X.T[np.tril_indices(d)]
    # Undo rescaling on off diagonal elements
    if norm_conserved:
        X[np.triu_indices(d, 1)] /= np.sqrt(2)
        X[np.tril_indices(d, -1)] /= np.sqrt(2)
    return X
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def test_naf_layer_full():
    batch_size = 2
    for nb_actions in (1, 3):
        # Construct single model with NAF as the only layer, hence it is fully deterministic
        # since no weights are used, which would be randomly initialized.
        L_flat_input = Input(shape=((nb_actions * nb_actions + nb_actions) // 2,))
        mu_input = Input(shape=(nb_actions,))
        action_input = Input(shape=(nb_actions,))
        x = NAFLayer(nb_actions, mode='full')([L_flat_input, mu_input, action_input])
        model = Model(inputs=[L_flat_input, mu_input, action_input], outputs=x)
        model.compile(loss='mse', optimizer='sgd')

        # Create random test data.
        L_flat = np.random.random((batch_size, (nb_actions * nb_actions + nb_actions) // 2)).astype('float32')
        mu = np.random.random((batch_size, nb_actions)).astype('float32')
        action = np.random.random((batch_size, nb_actions)).astype('float32')

        # Perform reference computations in numpy since these are much easier to verify.
        L = np.zeros((batch_size, nb_actions, nb_actions)).astype('float32')
        LT = np.copy(L)
        for l, l_T, l_flat in zip(L, LT, L_flat):
            l[np.tril_indices(nb_actions)] = l_flat
            l[np.diag_indices(nb_actions)] = np.exp(l[np.diag_indices(nb_actions)])
            l_T[:, :] = l.T
        P = np.array([np.dot(l, l_T) for l, l_T in zip(L, LT)]).astype('float32')
        A_ref = np.array([np.dot(np.dot(a - m, p), a - m) for a, m, p in zip(action, mu, P)]).astype('float32')
        A_ref *= -.5

        # Finally, compute the output of the net, which should be identical to the previously
        # computed reference.
        A_net = model.predict([L_flat, mu, action]).flatten()
        assert_allclose(A_net, A_ref, rtol=1e-5)
项目:odin    作者:imito    | 项目源码 | 文件源码
def __init__(self, tv_dim, ndim, nmix):
    self.tv_dim = tv_dim
    self.ndim = ndim
    self.nmix = nmix
    self.itril = np.tril_indices(tv_dim)
    self.Sigma = np.empty((self.ndim * self.nmix, 1))
    self.T_iS = None  # np.empty((self.tv_dim, self.ndim * self.nmix))
    self.T_iS_Tt = None  # np.empty((self.nmix, self.tv_dim * (self.tv_dim+1)/2))
    self.Tm = np.empty((self.tv_dim, self.ndim * self.nmix))
    self.Im = np.eye(self.tv_dim)
项目:teneto    作者:wiheto    | 项目源码 | 文件源码
def corrcoef_matrix(matrix):
    # Code originating from http://stackoverflow.com/a/24547964 by http://stackoverflow.com/users/2455058/jingchao

    r = np.corrcoef(matrix)
    rf = r[np.triu_indices(r.shape[0], 1)]
    df = matrix.shape[1] - 2
    ts = rf * rf * (df / (1 - rf * rf))
    pf = betai(0.5 * df, 0.5, df / (df + ts))
    p = np.zeros(shape=r.shape)
    p[np.triu_indices(p.shape[0], 1)] = pf
    p[np.tril_indices(p.shape[0], -1)] = pf
    p[np.diag_indices(p.shape[0])] = np.ones(p.shape[0])
    return r, p
项目:nept    作者:vandermeerlab    | 项目源码 | 文件源码
def vector_from_array(array):
    """Get triangle of output in vector from a correlation-type array

    Parameters
    ----------
    array : np.array

    Returns
    -------
    vector (np.array)

    Notes
    -----
    Old Matlab code indexes by column (aka.Fortran-style), so to get the indices
    of the top triangle, we have to do some reshaping.
    Otherwise, if the vector made up by rows is OK, then simply :
    triangle = np.triu_indices(array.size, k=1), out = array[triangle]
    """
    triangle_lower = np.tril_indices(array.shape[0], k=-1)
    flatten_idx = np.arange(array.size).reshape(array.shape)[triangle_lower]
    triangle = np.unravel_index(flatten_idx, array.shape, order='F')

    # triangle = np.triu_indices(array.size, k=1)
    # out = array[triangle]

    return array[triangle]
项目:reinforcement-learning    作者:amreis    | 项目源码 | 文件源码
def feature_extractor_2(state, action, normalize=True):
    # features are
    # - bias
    # - the elements of the state vector
    # - all second order terms (mixed and pure)
    # - sines of every first and second order term
    # - cosines of every first and second order term
    # all of that times two (i.e. for each of the actions)
    s_prime = np.r_[1, state] # dim(state) + 1
    # dim(state) (dim(state) + 1) / 2
    quadratic = np.outer(s_prime, s_prime)[np.tril_indices(s_prime.shape[0])].reshape(-1)
    # (dim(state) (dim(state) + 1) / 2) - 1
    sines = np.sin(quadratic[1:])
    cosines = np.cos(quadratic[1:])
    # dim(state)(dim(state) + 1) - 2 + dim(state)(dim(state) + 1) / 2
    state_feats = np.r_[quadratic, sines, cosines]
    # dim(state_feats) * 2
    features = np.outer(state_feats, np.array([0, 1]) == action).T.reshape(-1)
    if normalize:
        # normalize everything but the bias.
        norm = features / (np.linalg.norm(features))
        norm[0] = int(action == 0) * 1.0
        norm[state_feats.shape[0]] = int(action == 1) * 1.0
        return norm
    else:
        return features
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def _get_lowTriIDs(K):
  if K in lowTriIDsDict:
    return lowTriIDsDict[K]
  else:
    ltIDs = np.tril_indices(K)
    lowTriIDsDict[K] = ltIDs
    return ltIDs
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def _get_lowTriIDs(K):
  if K in lowTriIDsDict:
    return lowTriIDsDict[K]
  else:
    ltIDs = np.tril_indices(K)
    lowTriIDsDict[K] = ltIDs
    return ltIDs
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def _scorematrix2rankedlist_greedy(M, nPairs, doKeepZeros=False):
    ''' Return the nPairs highest-ranked pairs in score matrix M

        Args
        -------
          M : score matrix, K x K
              should have only entries kA,kB where kA <= kB

        Returns
        --------
          aList : list of integer ids for rows of M
          bList : list of integer ids for cols of M

        Example
        ---------
        _scorematrix2rankedlist( [0 2 3], [0 0 1], [0 0 0], 3)
        >> [ (0,2), (0,1), (1,2)]
    '''
    M = M.copy()
    M[np.tril_indices(M.shape[0])] = - np.inf
    Mflat = M.flatten()
    sortIDs = np.argsort(-1 * Mflat)
    # Remove any entries that are -Inf
    sortIDs = sortIDs[Mflat[sortIDs] != -np.inf]
    if not doKeepZeros:
        # Remove any entries that are zero
        sortIDs = sortIDs[Mflat[sortIDs] != 0]
    bestrs, bestcs = np.unravel_index(sortIDs, M.shape)
    return bestrs[:nPairs].tolist(), bestcs[:nPairs].tolist()
项目:decoding_challenge_cortana_2016_3rd    作者:kingjr    | 项目源码 | 文件源码
def _tril_indices(n, k=0):
    """Replacement for tril_indices that is provided for numpy >= 1.4"""
    mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
    indices = np.where(mask)

    return indices
项目:Visualization    作者:nwrush    | 项目源码 | 文件源码
def _get_strength_matrix(self):
        assert self.pmi.shape == self.ts_correlation.shape
        assert is_square(self.pmi)

        a = np.multiply(self.pmi, self.ts_correlation)

        lower_indexes = np.tril_indices(self.pmi.shape[0])
        a[lower_indexes] = np.nan
        return a
项目:neurotools    作者:michaelerule    | 项目源码 | 文件源码
def tril_elements(M):
    '''
    Somewhat like matlab's "diag" function, but for lower triangular matrices

    tril_elements(randn(D*(D+1)//2))
    '''
    if len(M.shape)==2:
        # M is a matrix
        if not M.shape[0]==M.shape[1]:
            raise ValueError("Extracting upper triangle elements supported only on square arrays")
        # Extract upper trianglular elements
        i = np.tril_indices(M.shape[0])
        return M[i]
    if len(M.shape)==1:
        # M is a vector
        # N(N+1)/2 = K
        # N(N+1) = 2K
        # NN+N = 2K
        # NN+N-2K=0
        # A x^2 + Bx + C
        # -1 +- sqrt(1-4*1*(-2K))
        # -----------------------
        #           2
        # 
        # (sqrt(1+8*K)-1)/2
        K = M.shape[0]
        N = (np.sqrt(1+8*K)-1)/2
        if N!=round(N):
            raise ValueError('Cannot pack %d elements into a square triangular matrix'%K)
        N = int(N)
        result = np.zeros((N,N))
        result[np.tril_indices(N)] = M
        return result
    raise ValueError("Must be 2D matrix or 1D vector")
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def givens_rotation(A):
    """Perform QR decomposition of matrix A using Givens rotation."""
    (num_rows, num_cols) = np.shape(A)

    # Initialize orthogonal matrix Q and upper triangular matrix R.
    Q = np.identity(num_rows)
    R = np.copy(A)

    # Iterate over lower triangular matrix.
    (rows, cols) = np.tril_indices(num_rows, -1, num_cols)
    for (row, col) in zip(rows, cols):

        # Compute Givens rotation matrix and
        # zero-out lower triangular matrix entries.
        if R[row, col] != 0:
            (c, s) = _givens_rotation_matrix_entries(R[col, col], R[row, col])

            G = np.identity(num_rows)
            G[[col, row], [col, row]] = c
            G[row, col] = s
            G[col, row] = -s

            R = np.dot(G, R)
            Q = np.dot(Q, G.T)

    return (Q, R)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _fill_lower_triangular(self, x):
    """Numpy implementation of `fill_lower_triangular`."""
    x = np.asarray(x)
    d = x.shape[-1]
    # d = n(n+1)/2 implies n is:
    n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
    ids = np.tril_indices(n)
    y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)
    y[..., ids[0], ids[1]] = x
    return y
项目:search-MjoLniR    作者:wikimedia    | 项目源码 | 文件源码
def _binary_sim(matrix):
    """Compute a jaccard similarity matrix.

    Vecorization based on: https://stackoverflow.com/a/40579567

    Parameters
    ----------
    matrix : np.array

    Returns
    -------
    np.array
        matrix of shape (n_rows, n_rows) giving the similarity
        between rows of the input matrix.
    """
    # Generate the indices of the lower triangle of our result matrix.
    # The diagonal is offset by -1 because the identity in a similarity
    # matrix is always 1.
    r, c = np.tril_indices(matrix.shape[0], -1)

    # Particularly large groups can blow out memory usage. Chunk the calculation
    # into steps that require no more than ~100MB of memory at a time.
    # We have 4 2d intermediate arrays in memory at a given time, plus the
    # input and output:
    #  p1 = max_rows * matrix.shape[1]
    #  p2 = max_rows * matrix.shape[1]
    #  intersection = max_rows * matrix.shape[1] * 4
    #  union = max_rows * matrix.shape[1] * 8
    # This adds up to:
    #  memory usage = max_rows * matrix.shape[1] * 14
    mem_limit = 100 * pow(2, 20)
    max_rows = mem_limit / (14 * matrix.shape[1])
    out = np.eye(matrix.shape[0])
    for c_batch, r_batch in _batch(c, r, max_rows):
        # Use those indices to build two matrices that contains all
        # the rows we need to do a similarity comparison on
        p1 = matrix[c_batch]
        p2 = matrix[r_batch]
        # Run the main jaccard calculation
        intersection = np.logical_and(p1, p2).sum(1)
        union = np.logical_or(p1, p2).sum(1).astype(np.float64)
        # Build the result matrix with 1's on the diagonal
        # Insert the result of our similarity calculation at their original indices
        out[c_batch, r_batch] = intersection / union
    # Above only populated half of the matrix, the mirrored diagonal contains
    # only zeros. Fix that up by transposing. Adding the transposed matrix double
    # counts the diagonal so subtract that back out. We could skip this step and
    # leave half the matrix empty, but it takes a fraction of a ms to be correct
    # even on mid-sized inputs (~200 queries).
    return out + out.T - np.diag(np.diag(out))
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def gaus_posterior(dim, std0):
    """Initialise a posterior Gaussian distribution with a diagonal covariance.

    Even though this is initialised with a diagonal covariance, a full
    covariance will be learned, using a lower triangular Cholesky
    parameterisation.

    Parameters
    ----------
    dim : tuple or list
        the dimension of this distribution.
    std0 : float
        the initial (unoptimized) diagonal standard deviation of this
        distribution.

    Returns
    -------
    Q : tf.contrib.distributions.MultivariateNormalTriL
        the initialised posterior Gaussian object.

    Note
    ----
    This will make tf.Variables on the randomly initialised mean and covariance
    of the posterior. The initialisation of the mean is from a Normal with zero
    mean, and ``std0`` standard deviation, and the initialisation of the (lower
    triangular of the) covariance is from a gamma distribution with an alpha of
    ``std0`` and a beta of 1.

    """
    o, i = dim

    # Optimize only values in lower triangular
    u, v = np.tril_indices(i)
    indices = (u * i + v)[:, np.newaxis]
    l0 = np.tile(np.eye(i), [o, 1, 1])[:, u, v].T
    l0 = l0 * tf.random_gamma(alpha=std0, shape=l0.shape, seed=next(seedgen))
    lflat = tf.Variable(l0, name="W_cov_q")
    Lt = tf.transpose(tf.scatter_nd(indices, lflat, shape=(i * i, o)))
    L = tf.reshape(Lt, (o, i, i))

    mu_0 = tf.random_normal((o, i), stddev=std0, seed=next(seedgen))
    mu = tf.Variable(mu_0, name="W_mu_q")
    Q = MultivariateNormalTriL(mu, L)
    return Q


#
# KL divergence calculations
#
项目:tadtool    作者:vaquerizaslab    | 项目源码 | 文件源码
def impute_missing_bins(hic_matrix, regions=None, per_chromosome=True, stat=np.ma.mean):
    """
    Impute missing contacts in a Hi-C matrix.

    For inter-chromosomal data uses the mean of all inter-chromosomal contacts,
    for intra-chromosomal data uses the mean of intra-chromosomal counts at the corresponding diagonal.

    :param hic_matrix: A square numpy array
    :param regions: A list of :class:`~GenomicRegion`s - if omitted, will create a dummy list
    :param per_chromosome: Do imputation on a per-chromosome basis (recommended)
    :param stat: The aggregation statistic to be used for imputation, defaults to the mean.
    """
    if regions is None:
        for i in range(hic_matrix.shape[0]):
            regions.append(GenomicRegion(chromosome='', start=i, end=i))

    chr_bins = dict()
    for i, region in enumerate(regions):
        if region.chromosome not in chr_bins:
            chr_bins[region.chromosome] = [i, i]
        else:
            chr_bins[region.chromosome][1] = i

    n = len(regions)
    if not hasattr(hic_matrix, "mask"):
        hic_matrix = masked_matrix(hic_matrix)

    imputed = hic_matrix.copy()
    if per_chromosome:
        for c_start, c_end in chr_bins.itervalues():
            # Correcting intrachromoc_startmal contacts by mean contact count at each diagonal
            for i in range(c_end - c_start):
                ind = kth_diag_indices(c_end - c_start, -i)
                diag = imputed[c_start:c_end, c_start:c_end][ind]
                diag[diag.mask] = stat(diag)
                imputed[c_start:c_end, c_start:c_end][ind] = diag
            # Correcting interchromoc_startmal contacts by mean of all contact counts between
            # each set of chromoc_startmes
            for other_start, other_end in chr_bins.itervalues():
                # Only correct upper triangle
                if other_start <= c_start:
                    continue
                inter = imputed[c_start:c_end, other_start:other_end]
                inter[inter.mask] = stat(inter)
                imputed[c_start:c_end, other_start:other_end] = inter
    else:
        for i in range(n):
            diag = imputed[kth_diag_indices(n, -i)]
            diag[diag.mask] = stat(diag)
            imputed[kth_diag_indices(n, -i)] = diag
    # Copying upper triangle to lower triangle
    imputed[np.tril_indices(n)] = imputed.T[np.tril_indices(n)]
    return imputed
项目:tadtool    作者:vaquerizaslab    | 项目源码 | 文件源码
def _plot(self, region=None, cax=None):
        if region is None:
            raise ValueError("Cannot plot triangle plot for whole genome.")

        hm, sr = sub_matrix_regions(self.hic_matrix, self.regions, region)
        hm[np.tril_indices(hm.shape[0])] = np.nan
        # Remove part of matrix further away than max_dist
        if self.max_dist is not None:
            for i in range(hm.shape[0]):
                i_region = sr[i]
                for j in range(hm.shape[1]):
                    j_region = sr[j]

                    if j_region.start-i_region.end > self.max_dist:
                        hm[i, j] = np.nan

        hm_masked = np.ma.MaskedArray(hm, mask=np.isnan(hm))
        # prepare an array of the corner coordinates of the Hic-matrix
        # Distances have to be scaled by sqrt(2), because the diagonals of the bins
        # are sqrt(2)*len(bin_size)
        sqrt2 = math.sqrt(2)
        bin_coords = np.r_[[(x.start - 1) for x in sr], sr[-1].end]/sqrt2
        X, Y = np.meshgrid(bin_coords, bin_coords)
        # rotatate coordinate matrix 45 degrees
        sin45 = math.sin(math.radians(45))
        X_, Y_ = X*sin45 + Y*sin45, X*sin45 - Y*sin45
        # shift x coords to correct start coordinate and center the diagonal directly on the
        # x-axis
        X_ -= X_[1, 0] - (sr[0].start - 1)
        Y_ -= .5*np.min(Y_) + .5*np.max(Y_)
        # create plot
        self.hicmesh = self.ax.pcolormesh(X_, Y_, hm_masked, cmap=self.colormap, norm=self.norm)

        # set limits and aspect ratio
        #self.ax.set_aspect(aspect="equal")
        ylim_max = 0.5*(region.end-region.start)
        if self.max_dist is not None and self.max_dist/2 < ylim_max:
            ylim_max = self.max_dist/2
        self.ax.set_ylim(0, ylim_max)
        # remove y ticks
        self.ax.set_yticks([])
        # hide background patch
        self.ax.patch.set_visible(False)
        if self.show_colorbar:
            self.add_colorbar(cax)
项目:smp_base    作者:x75    | 项目源码 | 文件源码
def mixtureMV(self, mu, sig, ps):
        """LearningRules.mixtureMV

        Sample from a multivariate gaussian mixture with \mu = means, \Sigma = cov matrix
        """
        # print "%s.mixtureMV: Implement me" % (self.__class__.__name__)

        # print "mixtureMV", "mu", mu.shape, "sig", sig.shape, "ps", ps.shape

        mixcomps = self.mixcomps
        d = self.dim

        assert mixcomps == ps.shape[0]
        assert not np.isnan(mu).any()
        assert not np.isnan(sig).any()
        assert not np.isnan(ps).any()

        # check flat inputs
        if mu.shape[1] == 1:
            mu = mu.reshape((self.mixcomps, self.dim))
            # mu_ = mu.reshape((mixcomps, d))
        if sig.shape[1] == 1:
            # S_diag = sig[:(mixcomps * self.dim)]
            # S_triu = sig[(mixcomps * self.dim):]
            S_diag = sig[:(mixcomps * d)].reshape((mixcomps, d))
            S_triu = sig[(mixcomps * d):].reshape((mixcomps, -1))
            S = []
            for i in range(mixcomps):
                S_ = np.diag(S_diag[i])
                S_[np.triu_indices(d,  1)] = S_triu[i]
                S_[np.tril_indices(d, -1)] = S_triu[i] # check if thats correct
                S.append(S_)
            sig = np.array(S)

        # print "mixtureMV", "mu", mu.shape, "sig", sig.shape, "ps", ps.shape

        # d = mu.shape[0]
        compidx = self.mixture_sample_prior(ps)

        mu_ = mu[compidx]
        S_  = sig[compidx]

        # print "compidx", compidx, "mu_", mu_, "S_", S_

        y = np.random.multivariate_normal(mu_, S_)
        return y

    # mixture, mdn_loss, and softmax are taken from karpathy's MixtureDensityNets.py
项目:smp_base    作者:x75    | 项目源码 | 文件源码
def test_mixtureMV(args):
    """test_mixtureMV

    Test mixtureMV() by sampling from dummy statistics mu, S, p
    """

    mixcomps = 3
    d = 2
    mu = np.random.uniform(-1, 1, size = (mixcomps, d))
    S = np.array([np.eye(d) * 0.01 for c in range(mixcomps)])
    for s in S:
        s += 0.05 * np.random.uniform(-1, 1, size = s.shape)
        s[np.tril_indices(d, -1)] = s[np.triu_indices(d, 1)]
    pi = np.ones((mixcomps, )) * 1.0/mixcomps

    lr = LearningRules(ndim_out = d, dim = d)
    lr.learnFORCEmdn_setup(mixcomps = mixcomps)

    fig = plt.figure()
    gs = gridspec.GridSpec(2, mixcomps)
    for g in gs:
        fig.add_subplot(g)

    ax = fig.axes[0]
    ax.plot(mu[:,0], mu[:,1], "ko", alpha = 0.5)
    # ax.set_xlim((-1.1, 1.1))
    # ax.set_ylim((-1.1, 1.1))


    ax = fig.axes[0] # (2 * mixcomps) + c]
    for i in range(1000):
        y = lr.mixtureMV(mu, S, pi)
        print "y", y
        ax.plot([y[0]], [y[1]], "ro", alpha = 0.2, markersize = 3.0)

    ax.set_aspect(1)

    for c in range(mixcomps):
        ax = fig.axes[mixcomps + c]
        ax.imshow(S[c], cmap = plt.get_cmap('PiYG'))

    fig.show()
    plt.show()

    print "mu", mu.shape, "S", S.shape
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def updateScoreMat_wholeELBO(ScoreMat, curModel, SS, doAllPairs=0):
    ''' Calculate upper-tri matrix of exact ELBO gap for each candidate pair

        Returns
        ---------
        Mraw : 2D array, size K x K. Uppert tri entries carry content.
            Mraw[j,k] gives the scalar ELBO gap for the potential merge of j,k
    '''
    K = SS.K
    if doAllPairs:
        AGap = curModel.allocModel.calcHardMergeGap_AllPairs(SS)
        OGap = curModel.obsModel.calcHardMergeGap_AllPairs(SS)
        ScoreMat = AGap + OGap
        ScoreMat[np.tril_indices(SS.K)] = -np.inf
        for k, uID in enumerate(SS.uIDs):
            CountTracker[uID] = SS.getCountVec()[k]
        nUpdated = SS.K * (SS.K - 1) / 2
    else:
        ScoreMat[np.tril_indices(SS.K)] = -np.inf
        # Rescore only specific pairs that are positive
        redoMask = ScoreMat > -1 * ELBO_GAP_ACCEPT_TOL
        for k, uID in enumerate(SS.uIDs):
            if CountTracker[uID] == 0:
                # Always precompute for brand-new comps
                redoMask[k, :] = 1
                redoMask[:, k] = 1
            else:
                absDiff = np.abs(SS.getCountVec()[k] - CountTracker[uID])
                percDiff = absDiff / (CountTracker[uID] + 1e-10)
                if percDiff > 0.25:
                    redoMask[k, :] = 1
                    redoMask[:, k] = 1
                    CountTracker[uID] = SS.getCountVec()[k]
        redoMask[np.tril_indices(SS.K)] = 0
        aList, bList = np.unravel_index(np.flatnonzero(redoMask), (SS.K, SS.K))

        if len(aList) > 0:
            mPairIDs = zip(aList, bList)
            AGap = curModel.allocModel.calcHardMergeGap_SpecificPairs(
                SS, mPairIDs)
            OGap = curModel.obsModel.calcHardMergeGap_SpecificPairs(
                SS, mPairIDs)
            ScoreMat[aList, bList] = AGap + OGap
        nUpdated = len(aList)
    MergeLogger.log('MERGE ScoreMat Updates: %d entries.' % (nUpdated),
                    level='debug')
    return ScoreMat
项目:posthocs    作者:maximtrp    | 项目源码 | 文件源码
def posthoc_tukey_hsd(x, g, alpha = 0.05):

    '''Pairwise comparisons with TukeyHSD confidence intervals. This is a
    convenience function to make statsmodels pairwise_tukeyhsd() method more
    applicable for further use.

        Parameters
        ----------
        x : array_like or pandas Series object, 1d
            An array, any object exposing the array interface, containing
            the response variable. NaN values will cause an error. Please
            handle manually.

        g : array_like or pandas Series object, 1d
            An array, any object exposing the array interface, containing
            the groups' names. Can be string or integers.

        alpha : float, optional
            Significance level for the test. Default is 0.05.

        Returns
        -------
        Numpy ndarray where 0 is False (not significant), 1 is True (significant),
        and -1 is for diagonal elements.

        Examples
        --------

        >>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
        >>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
        >>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
        array([[-1,  1,  0],
               [ 1, -1,  1],
               [ 0,  1, -1]])

    '''

    result = pairwise_tukeyhsd(x, g, alpha=0.05)
    groups = np.array(result.groupsunique, dtype=np.str)
    groups_len = len(groups)

    vs = np.arange(groups_len, dtype=np.int)[:,None].T.repeat(groups_len, axis=0)

    for a in result.summary()[1:]:
        a0 = str(a[0])
        a1 = str(a[1])
        a0i = np.where(groups == a0)[0][0]
        a1i = np.where(groups == a1)[0][0]
        vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0

    vs = np.triu(vs)
    np.fill_diagonal(vs, -1)
    tri_lower = np.tril_indices(vs.shape[0], -1)
    vs[tri_lower] = vs.T[tri_lower]


    return vs