Python numpy.random 模块,multivariate_normal() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用numpy.random.multivariate_normal()

项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def test_mlp():
    np.random.seed(0)
    mean1 = [0, 0]
    mean2 = [30, 30]
    cov = [[2, 0], [0, 2]]
    x1 = normal(mean1, cov, 2000)
    x2 = normal(mean2, cov, 2000)
    y1 = np.ones((1000,1))
    y2 = np.zeros((1000,1))
    ds = Dataset(2, float, 1, int)
    ds.add(x1[:1000,:], y1)
    ds.add(x2[:1000,:], y2)
    mlp = MLP(2, 1, [2, 2, 2, 2])
    sgd = SGD(mlp, rate_decay_th=0,iterations = 1000)
    sgd.train(ds)
    y = sgd.predict(x1[1000:,:])
    print(np.sum(y > 0.5) , y.shape[0])
    assert(np.sum(y > 0.5) == y.shape[0])
    y = sgd.predict(x2[1000:,:])
    print(np.sum(y < 0.5), y.shape[0])
    assert(np.sum(y < 0.5) == y.shape[0])
项目:lnn    作者:wgao9    | 项目源码 | 文件源码
def main():
    r = 0.9999
    data = nr.multivariate_normal([0,0],[[1,r],[r,1]],100)
    print "Entropy: "
    print "Ground Truth = ", log(2*pi*exp(1))+0.5*log(1-r**2)
    print "LNN: H(X) =  ", lnn.entropy(data)
    print "KDE: H(X) = ", lnn.KDE_entropy(data)
    print "KL: H(X) = ", lnn.KL_entropy(data)
    print "LNN(1st order): H(X) = ", lnn.LNN_1_entropy(data), "\n"

    print "Mutual Information: "
    print "Ground Truth = ", -0.5*log(1-r**2)
    print "LNN: I(X;Y) =  ", lnn.mi(data,split=1)
    print "KDE: I(X;Y) =  ", lnn._3KDE_mi(data,split=1)
    print "3KL: I(X;Y) =  ", lnn._3KL_mi(data,split=1)
    print "KSG: I(X;Y) =  ", lnn._KSG_mi(data,split=1)
    print "LNN(1st order): I(X;Y) =  ", lnn._3LNN_1_mi(data,split=1)
    print "LNN(1st order, KSG trick): I(X;Y) =  ", lnn._3LNN_1_KSG_mi(data,split=1)
    print "LNN(2nd order, KSG trick): I(X;Y) =  ", lnn._3LNN_2_KSG_mi(data,split=1)
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def null_model(num_samples, dimension = 1, rho=0):
        data_z = np.reshape(uniform(0,5,num_samples*dimension),(num_samples,dimension))
        coin_flip_x = np.random.choice([0,1],replace=True,size=num_samples)
        coin_flip_y = np.random.choice([0,1],replace=True,size=num_samples)
        mean_noise = [0,0]
        cov_noise = [[1,0],[0,1]]
        noise_x, noise_y = multivariate_normal(mean_noise, cov_noise, num_samples).T
        data_x = zeros(num_samples)
        data_x[coin_flip_x == 0,] = 1.7*data_z[coin_flip_x == 0,0] 
        data_x[coin_flip_x == 1,] = -1.7*data_z[coin_flip_x == 1,0]
        data_x = data_x + noise_x
        data_y = zeros(num_samples)
        data_y[coin_flip_y == 0,] = (data_z[coin_flip_y == 0,0]-2.7)**2
        data_y[coin_flip_y == 1,] = -(data_z[coin_flip_y == 1,0]-2.7)**2+13
        data_y = data_y + noise_y
        data_x = np.reshape(data_x, (num_samples,1))
        data_y = np.reshape(data_y, (num_samples,1))
        return data_x, data_y, data_z
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def alternative_model(num_samples,dimension = 1, rho=0.15):
        data_z = np.reshape(uniform(0,5,num_samples*dimension),(num_samples,dimension))
        rr = uniform(0,1, num_samples)
        idx_rr = np.where(rr < rho)
        coin_flip_x = np.random.choice([0,1],replace=True,size=num_samples)
        coin_flip_y = np.random.choice([0,1],replace=True,size=num_samples)
        coin_flip_y[idx_rr] = coin_flip_x[idx_rr]
        mean_noise = [0,0]
        cov_noise = [[1,0],[0,1]]
        noise_x, noise_y = multivariate_normal(mean_noise, cov_noise, num_samples).T
        data_x = zeros(num_samples)
        data_x[coin_flip_x == 0] = 1.7*data_z[coin_flip_x == 0,0] 
        data_x[coin_flip_x == 1] = -1.7*data_z[coin_flip_x == 1,0]
        data_x = data_x + noise_x
        data_y = zeros(num_samples)
        data_y[coin_flip_y == 0] = (data_z[coin_flip_y == 0,0]-2.7)**2
        data_y[coin_flip_y == 1] = -(data_z[coin_flip_y == 1,0]-2.7)**2+13
        data_y = data_y + noise_y
        data_x = np.reshape(data_x, (num_samples,1))
        data_y = np.reshape(data_y, (num_samples,1))
        return data_x, data_y, data_z
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def LargeScale(num_samples, dimension=4):
        ''' dimension takes large even numbers, e.g. 50, 100 '''
        Xmean = zeros(dimension)
        Xcov = identity(dimension)
        data_x = multivariate_normal(Xmean, Xcov, num_samples)
        dd = dimension/2
        Zmean = zeros(dd+1)
        Zcov = identity(dd+1)
        Z = multivariate_normal(Zmean, Zcov, num_samples)
        first_term = sqrt(2./dimension)*sum(sign(data_x[:,arange(0,dimension,2)]* data_x[:,arange(1,dimension,2)])*abs(Z[:,range(dd)]),axis=1,keepdims=True)
        second_term = Z[:,[dd]] #take the last dimension of Z 
        data_y = first_term + second_term
        return data_x, data_y
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def gen_U(self, n_dims, n, V):
        """ u ~ N(v, I). 533.
        """
        cov = np.eye(n_dims)

        U = []
        for v in V:
            ? = np.zeros(n_dims)
            U_for_class_v = m_normal(?, cov, n)
            U_for_class_v -= U_for_class_v.mean(axis=0)
            U_for_class_v += v  # Center at v to control test results.
            U.append(U_for_class_v)

        # To control the test result, each set of u's sums to its respective v.
        for x in range(len(U)):
            are_same = np.allclose(V[x], U[x].mean(axis=0))
            assert are_same ==  True

        U = np.vstack(U)

        return U
项目:scikit-discovery    作者:MITHaystack    | 项目源码 | 文件源码
def gaussian_walk(pos, grid, step_size=None):
    '''
    A gaussian random walk function

    @param pos: tuple of input point
    @param grid: bounds for walk
    @param step_size: maximal step size

    @return position tuple
    '''

    step = multivariate_normal(mean=[0,0], cov=[[grid[0],0],[0,grid[1]]], size=1)[0]
    newpos = [pos[0] + step[0], pos[1] + step[1]]       

    newpos = keep_in_bound(newpos, grid)
    return newpos
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def DAG_simulation_version1(num_samples):
        dimension = 1
        rho = 0
        data_z = np.reshape(uniform(0,5,num_samples*dimension),(num_samples,dimension))
        rr = uniform(0,1, num_samples)
        idx_rr = np.where(rr < rho)
        coin_flip_x = np.random.choice([0,1],replace=True,size=num_samples)
        coin_flip_y = np.random.choice([0,1],replace=True,size=num_samples)
        coin_flip_y[idx_rr] = coin_flip_x[idx_rr]
        mean_noise = [0,0]
        cov_noise = [[1,0],[0,1]]
        noise_x, noise_y = multivariate_normal(mean_noise, cov_noise, num_samples).T
        data_x = zeros(num_samples)
        data_x[coin_flip_x == 0] = 1.7*data_z[coin_flip_x == 0,0] 
        data_x[coin_flip_x == 1] = -1.7*data_z[coin_flip_x == 1,0]
        data_x = data_x + noise_x
        data_y = zeros(num_samples)
        data_y[coin_flip_y == 0] = (data_z[coin_flip_y == 0,0]-2.7)**2
        data_y[coin_flip_y == 1] = -(data_z[coin_flip_y == 1,0]-2.7)**2+13
        data_y = data_y + noise_y
        data_x = np.reshape(data_x, (num_samples,1))
        data_y = np.reshape(data_y, (num_samples,1))
        coin_x = np.reshape(coin_flip_x, (num_samples,1))
        coin_y = np.reshape(coin_flip_y, (num_samples,1))

        noise_A, noise_B = multivariate_normal(mean_noise, cov_noise, num_samples).T
        noise_A = np.reshape(noise_A, (num_samples,1))
        noise_B = np.reshape(noise_B, (num_samples,1))

        data_A = (data_y-5)**2/float(3) + 5 + noise_A
        #data_A = (data_y-5)**2/float(11)+ 5 +noise_A
        #data_B = 5*np.tanh(data_y) + noise_B # tanh version 
        #data_B = 5*np.sin(data_y) + noise_B # sine version
        data_B = 5.5*np.tanh(data_y) + noise_B 

        data_matrix = np.concatenate((data_x,data_y,data_z,data_A,data_B,coin_x, coin_y),axis=1)
        return data_matrix
        # data_x, data_y, data_z, data_A, data_B, coin_x, coin_y,
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def VaryDimension(num_samples, dimension = 5):
        Xmean = zeros(dimension)
        Xcov = identity(dimension)
        data_x = multivariate_normal(Xmean, Xcov, num_samples)
        data_z = transpose([normal(0,1,num_samples)])
        data_y = 20*sin(4*pi*(data_x[:,[0]]**2 + data_x[:,[1]]**2)) + data_z
        return data_x,data_y
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def gen_data(cls, dims, n, K, shared_cov, dist_bw_means):
        X = np.vstack([m_normal(np.ones(dims) * dist_bw_means * x,
                                shared_cov, n) for x in range(K)])
        Y = np.hstack(['gaussian_{}'.format(k)] * 100 for k in range(5))
        fnames = np.asarray(['gaussian_{}_x_{}.jpg'.format(k, x)
                             for k in range(K) for x in range(n)])

        # Do not delete these assertions.
        assert len(X.shape) == 2
        assert X.shape == (n * K, dims)
        assert Y.shape[0] == X.shape[0] == fnames.shape[0]
        assert len(Y.shape) == 1
        assert len(fnames.shape) == 1

        return X, Y, fnames
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def gen_V(self, ?, n_classes, n_dims):
        """ v ~ N(0, ?): Consult Equations (2) on p. 533.

        DESCRIPTION: Samples whitened class centers from a multivariate
                      Gaussian distribution centered at 0, with covariance ?.
                      For testing purposes, we ensure that V.sum(axis=0) = 0. 

        PARAMETERS
         n_classes      (int): Number of classes.
         n_dims         (int): Dimensionality of the data.
         ?        (ndarray): Covariance between whitened class centers.
                                [n_dims x n_dims] 
        RETURNS
         V          (ndarray): Whitened class centers. [n_classes x n_dims] 
        """
        assert ?.shape[0] == ?.shape[1]

        ? = np.zeros(n_dims)  # [1 x n_dims] 
        np.random.seed(0)
        V = m_normal(?, ?, n_classes)
        V = V - V.mean(axis=0)  # Center means at origin to control result.


        assert np.allclose(V.sum(axis=0), 0)

        return V
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def gen_data(self, dims, n, K, shared_cov, dist_bw_means):
        X = np.vstack([m_normal(np.ones(dims) * dist_bw_means * x,
                                shared_cov, n) for x in range(K)])
        Y = np.hstack(['gaussian_{}'.format(k)] * 100 for k in range(5))
        fnames = np.asarray(['gaussian_{}_x_{}.jpg'.format(k, x)
                            for k in range(K) for x in range(n)])

        # Do not delete these assertions.
        assert len(X.shape) == 2
        assert X.shape == (n * K, dims)
        assert Y.shape[0] == X.shape[0] == fnames.shape[0]
        assert len(Y.shape) == 1
        assert len(fnames.shape) == 1

        return X, Y, fnames
项目:plda    作者:RaviSoji    | 项目源码 | 文件源码
def gen_training_set(n_gaussians, sample_size, n_dims):
    cov = np.random.randint(-10, 10, (n_dims, n_dims))
    cov = np.matmul(cov, cov.T) + np.eye(n_dims) * np.random.rand(n_dims)

    pts = np.vstack([m_normal(np.ones(n_dims) *
                              np.random.randint(-100, 100, 1),
                              cov, sample_size)
                     for x in range(n_gaussians)])
    lbls = np.hstack([['gaussian_{}'.format(x)] * sample_size
                      for x in range(n_gaussians)])

    return pts, lbls
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample_from_prior_given_hypers(self, pred, n_samples=1, joint=True):
        N_pred = pred.shape[0]
        if joint:
            mean = self.mean.value
            cov  = self.noiseless_kernel.cov(pred) # Gaussian likelihood happens here
            return npr.multivariate_normal(mean*np.ones(N_pred), cov, size=n_samples).T.squeeze()
        else:
            mean = self.mean.value
            var  = self.noiseless_kernel.diag_cov(pred)
            return np.squeeze(mean + npr.randn(N_pred, n_samples) * np.sqrt(var)[:,None])

    # Sample from p(y)
    # This is achieved by first sampling theta from its hyperprior p(theta), and then
    # sampling y from p(y | theta)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample_from_posterior_given_hypers_and_data(self, pred, n_samples=1, joint=True):
        if joint:
            predicted_mean, cov = self.predict(pred, full_cov=True) # This part depends on the data
            return npr.multivariate_normal(predicted_mean, cov, size=n_samples).T.squeeze()
        else:
            predicted_mean, var = self.predict(pred, full_cov=False) # This part depends on the data
            return np.squeeze(predicted_mean[:,None] + npr.randn(pred.shape[0], n_samples) * np.sqrt(var)[:,None])

    # Sample from p(y | data), integrating out the hyperparameters (theta)
    # This is achieved by first sampling theta from p(theta | data), and then
    # sampling y from p(y | theta, data)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        # Sample from standard half-cauchy distribution
        lamda = np.abs(npr.standard_cauchy(size=n_samples))

        # I think scale is the thing called Tau^2 in the paper.
        return npr.randn() * lamda * self.scale
        # return npr.multivariate_normal()
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def logprob(self, x):
        return sps.multivariate_normal.logpdf(x, mean=self.mu, cov=self.cov)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        return npr.multivariate_normal(self.mu, self.cov, size=n_samples).T.squeeze()
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample_from_posterior_given_hypers_and_data(self, pred, n_samples=1, joint=True):
        if joint:
            predicted_mean, cov = self.predict(pred, full_cov=True) # This part depends on the data
            return npr.multivariate_normal(predicted_mean, cov, size=n_samples).T.squeeze()
        else:
            predicted_mean, var = self.predict(pred, full_cov=False) # This part depends on the data
            return np.squeeze(predicted_mean[:,None] + npr.randn(pred.shape[0], n_samples) * np.sqrt(var)[:,None])

    # Sample from p(y | data), integrating out the hyperparameters (theta)
    # This is achieved by first sampling theta from p(theta | data), and then
    # sampling y from p(y | theta, data)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        # Sample from standard half-cauchy distribution
        lamda = np.abs(npr.standard_cauchy(size=n_samples))

        # I think scale is the thing called Tau^2 in the paper.
        return npr.randn() * lamda * self.scale
        # return npr.multivariate_normal()
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def logprob(self, x):
        return sps.multivariate_normal.logpdf(x, mean=self.mu, cov=self.cov)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        return npr.multivariate_normal(self.mu, self.cov, size=n_samples).T.squeeze()
项目:adversarial-variational-bayes    作者:gdikov    | 项目源码 | 文件源码
def estimation(self, y):
        """ Estimate Shannon entropy.

        Parameters
        ----------
        y : (number of samples, dimension)-ndarray
             One row of y corresponds to one sample.

        Returns
        -------
        h : float
            Estimated Shannon entropy.

        References
        ----------
        Quing Wang, Sanjeev R. Kulkarni, and Sergio Verdu. Universal
        estimation of information measures for analog sources. Foundations
        And Trends In Communications And Information Theory, 5:265-353,
        2009.

        Examples
        --------
        h = co.estimation(y,ds)

        """

        num_of_samples, dim = y.shape  # number of samples, dimension

        # estimate the mean and the covariance of y:
        m = mean(y, axis=0)
        c = cov(y, rowvar=False)  # 'rowvar=False': 1 row = 1 observation

        # entropy of N(m,c):
        if dim == 1: 
            det_c = c  # det(): 'expected square matrix' exception
            # multivariate_normal(): 'cov must be 2 dimensional and square'
            # exception:
            c = array([[c]])

        else:
            det_c = det(c)

        h_normal = 1/2 * log((2*pi*exp(1))**dim * det_c)

        # generate samples from N(m,c):
        y_normal = multivariate_normal(m, c, num_of_samples)

        h = h_normal - self.kl_co.estimation(y, y_normal)

        return h
项目:mcmc_sv    作者:jiacheng0409    | 项目源码 | 文件源码
def __init__(self, response_name, covariates_names, train_df, Seed=rand.randint(1)):
        self.response_name = response_name
        self.covariates_names = covariates_names
        rand.seed(Seed)
        n_obs = train_df.shape[0]
        def beta_prior():
            dimension = len(self.covariates_names)
            response = train_df.as_matrix([self.response_name]).reshape((n_obs,1))
            covariates = train_df.as_matrix([self.covariates_names]).reshape((n_obs,dimension))

            linear_model = LinearRegression(fit_intercept=False)
            fitted = linear_model.fit(X=covariates, y=response)
            mean_vec = fitted.coef_[0]

            cov_mat = make_spd_matrix(n_dim=dimension) # the covariance matrix must be SPD

            Beta = dict()
            Beta['Value'] = rand.multivariate_normal(mean=mean_vec, cov=cov_mat)
            Beta['Mean'] = mean_vec
            Beta['Cov'] = cov_mat
            return Beta

        self.Beta = beta_prior()

        garch = arch_model(train_df['vwretd'], p=1, q=1)
        fitted = garch.fit(update_freq=0,show_warning=True)
        print('[INFO] Finished fitting a GARCH model as initialization for latent volatilities:\n{}'.format(fitted.summary()))

        alpha_mean_vec = np.array([fitted.params['omega'],fitted.params['beta[1]']])
        h_mean_vec = fitted.conditional_volatility
        self.H =  h_mean_vec

        def AlphaPrior():
            mean_vec = alpha_mean_vec
            cov_mat = make_spd_matrix(n_dim=2)  # the covariance matrix must be SPD
            Alpha = dict()
            Alpha['Value'] = rand.multivariate_normal(mean=mean_vec, cov=cov_mat)
            Alpha['Mean'] = mean_vec
            Alpha['Cov'] = cov_mat
            return Alpha
        Alpha = AlphaPrior()
        # this abs(Alpha_2) <= 1 constraint makes sure that our AR(1) for volatility is stationary
        while np.abs(Alpha['Value'][1] >= 1):  Alpha = AlphaPrior()
        self.Alpha = Alpha

        def SigmaPrior():
            Lambda = 0.2
            m = 5
            DegreeOfFreedom = n_obs + m - 1
            sigma_sq_inv = rand.chisquare(DegreeOfFreedom)
            sigma_sq = dict()
            sigma_sq['Value'] = float(m * Lambda) / sigma_sq_inv
            sigma_sq['Lambda'] = Lambda
            sigma_sq['m'] = m
            return sigma_sq
        Sigma_Sq = SigmaPrior()
        self.Sigma_Sq = Sigma_Sq
        print('{0}\n[INFO] Finished initialization of parameters.'.format('=' * 20 + NOW() + '=' * 20))