Python numpy 模块,matmul() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.matmul()

项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, h = -1):
        sq_dist = pdist(self.theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))

        # compute the rbf kernel

        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, self.theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(self.theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def KeyGen(**kwargs):
    '''
    Appendix B of BLISS paper
    m_bar = m + n

    o/p:    
    A: Public Key n x m' numpy array
    S: Secret Key m'x n numpy array
    '''
    q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha']
    Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m)
    S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq)
    S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q)
    A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q)
    #return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar
    return util.matrix_to_Zq(A, 2*q), S
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def test():
    # Classical SIS parameters
    n, m, alpha, q = 128, 872, 1, 114356107
    kappa = 20

    #Discrete Gaussian Parameters
    sd = 300
    eta = 1.2

    A, S = KeyGen(q = q,n = n,m = m,alpha = alpha)
    #print np.array(np.matmul(A,S) - q*np.eye(n),dtype=float)/(2*q) #to test AS = q mod(2q)
    z, c = Sign(msg = "Hello Bob",A = A,S = S,m = m,n = n,sd = sd,q = q,M = 3.0,kappa = kappa)
    print z
    print c
    print Verify(msg = "Hello Bob", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
    print Verify(msg = "Hello Robert", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
    print Verify(msg = "Hello Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
    print Verify(msg = "Hola Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def Verify(**kwargs):
    '''
        Verification for the signature
        i/p:
        msg: the string sent by the sender
        (z,c): vectors in Zq, the signature
        A  : numpy array, Verification Key dimension nxm
        T : the matrix AS mod q ,it is used in the Verification of the signature
    '''
    msg, z, c, A, T, sd, eta, m, k, q = kwargs['msg'], kwargs['z'], kwargs['c'], kwargs['A'], kwargs['T'], kwargs['sd'], kwargs['eta'], kwargs['m'], kwargs['k'], kwargs['q']
    norm_bound = eta * sd * np.sqrt(m)
    # checks for norm of z being small and that H(Az-Tc mod q,msg) hashes to c
    vec = util.vector_to_Zq(np.array(np.matmul(A,z) - np.matmul(T,c)), q)
    hashedList = util.hash_to_baseb(vec, msg, 3, k)
    print hashedList, c             
    if np.sqrt(z.dot(z)) <= norm_bound and np.array_equal(c, hashedList):
        return True
    else:
        return False
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def KeyGen(n, m, k, d, q):
    '''
        input:
        q : polynomial size prime number
        n, m, k : dimensions specifiers
        d : SIS parameter, hardest instances are where d ~ q^(n/m)

        output:
        Signing Key S :  Matrix of dimension mxk with coefficients in [-d.d]
        Verification Key A : Matrix of dimension nxm with coefficients from [-(q-1)/2,(q-1)/2]
        T : the matrix AS ,it is used in the Verification of the signature

    '''
    S = crypt_secure_matrix(d, m, k)
    A = crypt_secure_matrix((q-1)/2, n, m)
    T = np.matmul(A, S)
    return S, A, T
项目:Neural_Artistic_Style    作者:everfor    | 项目源码 | 文件源码
def transfer_color(content, style):
    import scipy.linalg as sl
    # Mean and covariance of content
    content_mean = np.mean(content, axis = (0, 1))
    content_diff = content - content_mean
    content_diff = np.reshape(content_diff, (-1, content_diff.shape[2]))
    content_covariance = np.matmul(content_diff.T, content_diff) / (content_diff.shape[0])

    # Mean and covariance of style
    style_mean = np.mean(style, axis = (0, 1))
    style_diff = style - style_mean
    style_diff = np.reshape(style_diff, (-1, style_diff.shape[2]))
    style_covariance = np.matmul(style_diff.T, style_diff) / (style_diff.shape[0])

    # Calculate A and b
    A = np.matmul(sl.sqrtm(content_covariance), sl.inv(sl.sqrtm(style_covariance)))
    b = content_mean - np.matmul(A, style_mean)

    # Construct new style
    new_style = np.reshape(style, (-1, style.shape[2])).T
    new_style = np.matmul(A, new_style).T
    new_style = np.reshape(new_style, style.shape)
    new_style = new_style + b

    return new_style
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, theta, h = -1):
        sq_dist = pdist(theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))

        # compute the rbf kernel
        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def run_random_sim(sim, L):
    """
    Run *L* simulations of the state space model specified by *sim*
    (see :func:`setup_random_sim`). Each simulation is added to *sim*
    index by an integer identifier.
    """
    sim['L'] = L
    for l in range(L):
        sim[l] = defaultdict(list)
        x_i = sim['mu'] + NP.matmul(sim['PI_sqrt'], NP.random.randn(sim['N']))
        for i in range(sim['I']):
            sim[l]['x'].append(x_i)
            # measurement
            v_i = NP.matmul(sim['R_sqrt'][i], NP.random.randn(sim['M']))
            sim[l]['y'].append(NP.matmul(sim['H'][i], sim[l]['x'][i]) + v_i)
            # time update
            u_i = NP.matmul(sim['Q_sqrt'][i], NP.random.randn(sim['N']))
            x_i = NP.matmul(sim['F'][i], x_i) + u_i
    return sim
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def sqrt_kf_sim(sim):
    """
    Process each simulation trial generated with
    :func:`setup_random_test` with a Kalman filter and return the
    posterior state estimates and error covariances.
    """
    post = defaultdict(dict)
    for l in range(sim['L']):
        x_hat_l, P_sqrt_l = sqrt_kalman_filter(sim[l]['y'],
                                               sim['H'],
                                               sim['R_sqrt'],
                                               sim['F'],
                                               sim['Q_sqrt'],
                                               sim['mu'],
                                               sim['PI_sqrt'])
        post[l]['x_hat'] = x_hat_l
        if l == 0:
            post['P'] = [NP.matmul(x, x.T) for x in P_sqrt_l]
        post[l]['error'] = []
        for x_i, x_hat_i in izip(sim[l]['x'], post[l]['x_hat']):
            post[l]['error'].append(x_hat_i - x_i)
    return post
项目:pyrsss    作者:butala    | 项目源码 | 文件源码
def sqrt_kf_tu(x_hat_posterior,
               P_sqrt_posterior,
               F_i,
               Q_sqrt_i,
               z_i=None):
    """
    Square root Kalman filter time update. Given the following:
    - *x_hat_posterior*: posterior state estimate (N)
    - *P_sqrt_posterior*: posterior error covariance square root (NxN)
    - *F_i*: time update operator (NxN)
    - *Q_sqrt_i*: time update noise covariance square root (NxN)
    - *z_i*: optional) systematic time update input (N)

    Return the tuple containing the one time step prediction of the
    state and the square root of the error covariance.
    """
    N, _ = F_i.shape
    x_hat_prior = NP.matmul(F_i, x_hat_posterior)
    if z_i is not None:
        x_hat_prior += z_i
    A_T = NP.block([NP.matmul(F_i, P_sqrt_posterior), Q_sqrt_i])
    R_T = NP.linalg.qr(A_T.T, mode='r')
    P_sqrt_prior = R_T.T[:, :N]
    return x_hat_prior, P_sqrt_prior
项目:Tweaker-3    作者:ChristophSchranz    | 项目源码 | 文件源码
def rotate_ascii_stl(self, rotation_matrix, content, filename):
        """Rotate the mesh array and save as ASCII STL."""
        mesh = np.array(content, dtype=np.float64)

        # prefix area vector, if not already done (e.g. in STL format)
        if len(mesh[0]) == 3:
            row_number = int(len(content)/3)
            mesh = mesh.reshape(row_number, 3, 3)

        # upgrade numpy with: "pip install numpy --upgrade"
        rotated_content = np.matmul(mesh, rotation_matrix)

        v0 = rotated_content[:, 0, :]
        v1 = rotated_content[:, 1, :]
        v2 = rotated_content[:, 2, :]
        normals = np.cross(np.subtract(v1, v0), np.subtract(v2, v0)) \
            .reshape(int(len(rotated_content)), 1, 3)
        rotated_content = np.hstack((normals, rotated_content))

        tweaked = list("solid %s" % filename)
        tweaked += list(map(self.write_facett, list(rotated_content)))
        tweaked.append("\nendsolid %s\n" % filename)
        tweaked = "".join(tweaked)

        return tweaked
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_exceptions(self):
        dims = [
            ((1,), (2,)),            # mismatched vector vector
            ((2, 1,), (2,)),         # mismatched matrix vector
            ((2,), (1, 2)),          # mismatched vector matrix
            ((1, 2), (3, 1)),        # mismatched matrix matrix
            ((1,), ()),              # vector scalar
            ((), (1)),               # scalar vector
            ((1, 1), ()),            # matrix scalar
            ((), (1, 1)),            # scalar matrix
            ((2, 2, 1), (3, 1, 2)),  # cannot broadcast
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            assert_raises(ValueError, self.matmul, a, b)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_shapes(self):
        dims = [
            ((1, 1), (2, 1, 1)),     # broadcast first argument
            ((2, 1, 1), (1, 1)),     # broadcast second argument
            ((2, 1, 1), (2, 1, 1)),  # matrix stack sizes match
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            res = self.matmul(a, b)
            assert_(res.shape == (2, 1, 1))

        # vector vector returns scalars.
        for dt in self.types:
            a = np.ones((2,), dtype=dt)
            b = np.ones((2,), dtype=dt)
            c = self.matmul(a, b)
            assert_(np.array(c).shape == ())
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_numpy_ufunc_override(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        class A(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return "A"

        class B(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return NotImplemented

        a = A([1, 2])
        b = B([1, 2])
        c = np.ones(2)
        assert_equal(self.matmul(a, b), "A")
        assert_equal(self.matmul(b, a), "A")
        assert_raises(TypeError, self.matmul, b, c)
项目:ababe    作者:unkcpz    | 项目源码 | 文件源码
def get_symmetry_permutation(self):
        """
        This a object function to get the permutation group operators.
        Represented as a table.
        """
        sym_perm = []
        numbers = [i for i in range(self.num_count)]
        sym_mat = spglib.get_symmetry(self._spg_cell, symprec=self.symprec)
        ops = [(r, t) for r, t in zip(sym_mat['rotations'],
                                      sym_mat['translations'])]
        for r, t in ops:
            pos_new = np.transpose(np.matmul(r, self._positions.T)) + t
            perm = self._get_new_id_seq(pos_new, numbers)
            sym_perm.append(perm)

        return sym_perm
项目:ababe    作者:unkcpz    | 项目源码 | 文件源码
def supercell(self, scale_mat):
        """
        Get the supercell of the origin gcell
        scale_mat is similar as H matrix in superlattice generator
        """
        # return self.__class__(...)
        sarr_lat = np.matmul(scale_mat, self.lattice)
        # coor_conv_pos = np.matmul(self.positions, self.lattice)
        # o_conv_pos = np.matmul(coor_conv_pos, np.linalg.inv(scale_mat))
        o_conv_pos = np.matmul(self.positions, np.linalg.inv(scale_mat))
        o_pos = self.get_frac_from_mat(scale_mat)

        l_of_positions = [i for i in map(lambda x: x+o_pos, list(o_conv_pos))]
        pos = np.concatenate(l_of_positions, axis=0)

        n = scale_mat.diagonal().prod()
        numbers = np.repeat(self.numbers, n)

        return self.__class__(sarr_lat, pos, numbers)
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def ams_extractor(x, sr, win_len, shift_len, order):
    from scipy.signal import hilbert
    envelope = np.abs(hilbert(x))
    for i in range(order-1):
        envelope = np.abs(hilbert(envelope))
    envelope = envelope * 1./3.
    frames = (len(envelope) - win_len) // shift_len
    hanning_window = np.hanning(win_len)
    ams_feature = np.zeros(shape=(15, frames))
    wts = cal_triangle_window(0, sr//2, win_len, 15, 15.6, 400)
    for i in range(frames):
        one_frame = x[i*shift_len:i*shift_len+win_len]
        one_frame = one_frame * hanning_window
        frame_fft = np.abs(np.fft.fft(one_frame, win_len))
        ams_feature[:,i] = np.matmul(wts, frame_fft)
    return ams_feature
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def ams_extractor(x, sr, win_len, shift_len, order=1, decimate_coef=1./4.):
    from scipy.signal import hilbert
    envelope = np.abs(hilbert(x))
    for i in range(order-1):
        envelope = np.abs(hilbert(envelope))
    envelope = envelope * decimate_coef
    frames = (len(envelope) - win_len) // shift_len
    hanning_window = np.hanning(win_len)
    ams_feature = np.zeros(shape=(15, frames))
    wts = cal_triangle_window(0, sr//2, win_len, 15, 15.6, 400)
    for i in range(frames):
        one_frame = x[i*shift_len:i*shift_len+win_len]
        one_frame = one_frame * hanning_window
        frame_fft = np.abs(np.fft.fft(one_frame, win_len))
        ams_feature[:,i] = np.matmul(wts, frame_fft)
    return ams_feature
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def unknown_feature_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def ams_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def run_test_matmul_aa_ci8_shape(self, shape, transpose=False):
        # **TODO: This currently never triggers the transpose path in the backend
        shape_complex = shape[:-1] + (shape[-1] * 2,)
        # Note: The xGPU-like correlation kernel does not support input values of -128 (only [-127:127])
        a8 = ((np.random.random(size=shape_complex) * 2 - 1) * 127).astype(np.int8)
        a_gold = a8.astype(np.float32).view(np.complex64)
        if transpose:
            a_gold = H(a_gold)
        # Note: np.matmul seems to be slow and inaccurate when there are batch dims
        c_gold = np.matmul(a_gold, H(a_gold))
        triu = np.triu_indices(shape[-2] if not transpose else shape[-1], 1)
        c_gold[..., triu[0], triu[1]] = 0
        a = a8.view(bf.DataType.ci8)
        a = bf.asarray(a, space='cuda')
        if transpose:
            a = H(a)
        c = bf.zeros_like(c_gold, space='cuda')
        self.linalg.matmul(1, a, None, 0, c)
        c = c.copy('system')
        np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def run_test_matmul_aa_dtype_shape(self, shape, dtype, axes=None, conj=False):
        a = ((np.random.random(size=shape)) * 127).astype(dtype)
        if axes is None:
            axes = range(len(shape))
        aa = a.transpose(axes)
        if conj:
            aa = aa.conj()
        c_gold = np.matmul(aa, H(aa))
        triu = np.triu_indices(shape[axes[-2]], 1)
        c_gold[..., triu[0], triu[1]] = 0
        a = bf.asarray(a, space='cuda')
        aa = a.transpose(axes)
        if conj:
            aa = aa.conj()
        c = bf.zeros_like(c_gold, space='cuda')
        self.linalg.matmul(1, aa, None, 0, c)
        c = c.copy('system')
        np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def run_test_matmul_ab_ci8_shape(self, shape, k, transpose=False):
        ashape_complex = shape[:-2] + (shape[-2], k * 2)
        bshape_complex = shape[:-2] + (k, shape[-1] * 2)
        a8 = (np.random.random(size=ashape_complex) * 255).astype(np.int8)
        b8 = (np.random.random(size=bshape_complex) * 255).astype(np.int8)
        a_gold = a8.astype(np.float32).view(np.complex64)
        b_gold = b8.astype(np.float32).view(np.complex64)
        if transpose:
            a_gold, b_gold = H(b_gold), H(a_gold)
        c_gold = np.matmul(a_gold, b_gold)
        a = a8.view(bf.DataType.ci8)
        b = b8.view(bf.DataType.ci8)
        a = bf.asarray(a, space='cuda')
        b = bf.asarray(b, space='cuda')
        if transpose:
            a, b = H(b), H(a)
        c = bf.zeros_like(c_gold, space='cuda')
        self.linalg.matmul(1, a, b, 0, c)
        c = c.copy('system')
        np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def run_benchmark_matmul_aa_correlator_kernel(self, ntime, nstand, nchan):
        x_shape = (ntime, nchan, nstand*2)
        perm = [1,0,2]
        x8 = ((np.random.random(size=x_shape+(2,))*2-1)*127).astype(np.int8)
        x = x8.astype(np.float32).view(np.complex64).reshape(x_shape)
        x = x.transpose(perm)
        b_gold = np.matmul(H(x[:,[0],:]), x[:,[0],:])
        triu = np.triu_indices(x_shape[-1], 1)
        b_gold[..., triu[0], triu[1]] = 0
        x = x8.view(bf.DataType.ci8).reshape(x_shape)
        x = bf.asarray(x, space='cuda')
        x = x.transpose(perm)
        b = bf.zeros_like(b_gold, space='cuda')
        bf.device.stream_synchronize();
        t0 = time.time()
        nrep = 200
        for _ in xrange(nrep):
            self.linalg.matmul(1, None, x, 0, b)
        bf.device.stream_synchronize();
        dt = time.time() - t0
        nflop = nrep * nchan * ntime * nstand*(nstand+1)/2 * 2*2 * 8
        print nstand, '\t', nflop / dt / 1e9, 'GFLOP/s'
        print '\t\t', nrep*ntime*nchan / dt / 1e6, 'MHz'
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def select_negtive(self, i_feat, s_feat, sess, topN=50):
    '''
    Select the triplets with the largest losses \n
    return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
    '''
    feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
    i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
    S = np.matmul(i_embed, s_embed.T)
    i_feat_pos = i_feat.repeat(topN, axis=0)
    s_feat_pos = s_feat.repeat(topN, axis=0)
    N = S.shape[0]
    np.fill_diagonal(S, -2*np.ones(N))
    neg_s_idx = S.argsort(axis=1)[:, -topN:]
    neg_i_idx = S.argsort(axis=0)[-topN:, :]
    s_feat_neg = s_feat[neg_s_idx.flatten('C')]
    i_feat_neg = i_feat[neg_i_idx.flatten('F')]
    return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss(self, sentence, image, K=30, margin=0.5):
    sim_matrix = tf.matmul(sentence, image, transpose_b=True)
    s_square = tf.reduce_sum(tf.square(sentence), axis=1)
    im_square = tf.reduce_sum(tf.square(image), axis=1)
    d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
    positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
    length = tf.shape(d)[-1]
    d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
    sen_loss_K ,_ = tf.nn.top_k(-1.0 * d, K, sorted=False) # note: this is negative value
    im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d), K, sorted=False) # note: this is negative value
    sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
    image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
    self.d_neg = (sen_loss_K + im_loss_K)/-2.0
    self.d_pos = positive
    self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
    self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
    self.endpoint['debug/d_Matrix'] = d
    self.endpoint['debug/positive'] = positive
    self.endpoint['debug/s_center_loss'] = sentence_center_loss
    self.endpoint['debug/i_center_loss'] = image_center_loss
    self.endpoint['debug/S'] = sim_matrix
    self.endpoint['debug/sentence_square'] = s_square
    self.endpoint['debug/image_square'] = im_square
    return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def select_negtive(self, i_feat, s_feat, sess, topN=50):
    '''
    Select the triplets with the largest losses \n
    return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
    '''
    feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
    i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
    S = np.matmul(i_embed, s_embed.T)
    i_feat_pos = i_feat.repeat(topN, axis=0)
    s_feat_pos = s_feat.repeat(topN, axis=0)
    N = S.shape[0]
    np.fill_diagonal(S, -2*np.ones(N))
    neg_s_idx = S.argsort(axis=1)[:, -topN:]
    neg_i_idx = S.argsort(axis=0)[-topN:, :]
    s_feat_neg = s_feat[neg_s_idx.flatten('C')]
    i_feat_neg = i_feat[neg_i_idx.flatten('F')]
    return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
项目:image-text-matching    作者:llltttppp    | 项目源码 | 文件源码
def top_K_loss(self, sentence, image, K=30, margin=0.5):
    sim_matrix = tf.matmul(sentence, image, transpose_b=True)
    s_square = tf.reduce_sum(tf.square(sentence), axis=1)
    im_square = tf.reduce_sum(tf.square(image), axis=1)
    d = tf.reshape(s_square,[-1,1]) - 2 * sim_matrix + tf.reshape(im_square, [1, -1])
    positive = tf.stack([tf.matrix_diag_part(d)] * K, axis=1)
    length = tf.shape(d)[-1]
    d = tf.matrix_set_diag(d, 8 * tf.ones([length]))
    sen_loss_K ,_ = tf.nn.top_k(-1.0 * d, K, sorted=False) # note: this is negative value
    im_loss_K,_ = tf.nn.top_k(tf.transpose(-1.0 * d), K, sorted=False) # note: this is negative value
    sentence_center_loss = tf.nn.relu(positive + sen_loss_K + margin)
    image_center_loss = tf.nn.relu(positive + im_loss_K + margin)
    self.d_neg = (sen_loss_K + im_loss_K)/-2.0
    self.d_pos = positive
    self.endpoint['debug/im_loss_topK'] = -1.0 * im_loss_K
    self.endpoint['debug/sen_loss_topK'] = -1.0 * sen_loss_K 
    self.endpoint['debug/d_Matrix'] = d
    self.endpoint['debug/positive'] = positive
    self.endpoint['debug/s_center_loss'] = sentence_center_loss
    self.endpoint['debug/i_center_loss'] = image_center_loss
    self.endpoint['debug/S'] = sim_matrix
    self.endpoint['debug/sentence_square'] = s_square
    self.endpoint['debug/image_square'] = im_square
    return tf.reduce_sum(sentence_center_loss), tf.reduce_sum(image_center_loss)
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def conv_feat_map_tensor_gram(conv_fmap_tensor):
  """Compute Gram matrix of conv feature maps.

  Used in style transfer.
  """
  tf.assert_equal(tf.rank(conv_fmap_tensor), 4)
  shape = tf.shape(conv_fmap_tensor)
  num_images = shape[0]
  width = shape[1]
  height = shape[2]
  num_filters = shape[3]
  filters = tf.reshape(conv_fmap_tensor,
                       tf.stack([num_images, -1, num_filters]))
  grams = tf.matmul(
      filters, filters,
      transpose_a=True) / tf.to_float(width * height * num_filters)
  return grams
项目:aiida-fleur    作者:broeder-j    | 项目源码 | 文件源码
def abs_to_rel_f(vector, cell, pbc):
    """
    Converts a position vector in absolut coordinates to relative coordinates
    for a film system.
    """
    # TODO this currently only works if the z-coordinate is the one with no pbc
    # Therefore if a structure with x non pbc is given this should also work.
    # maybe write a 'tranform film to fleur_film routine'?
    if len(vector) == 3:
        if pbc[2] == False:
            # leave z coordinate absolut
            # convert only x and y.
            postionR =  np.array(vector)
            postionR_f =  np.array(postionR[:2])
            cell_np = np.array(cell)
            cell_np = np.array(cell_np[0:2, 0:2])
            inv_cell_np = np.linalg.inv(cell_np)
            new_xy = [i for i in np.matmul(postionR_f, inv_cell_np)]#np.matmul(inv_cell_np, postionR_f)]
            new_rel_pos_f = [new_xy[0], new_xy[1], postionR[2]]
            return new_rel_pos_f
        else:
            print 'FLEUR can not handle this type of film coordinate'
    else:
        return False
项目:aiida-fleur    作者:broeder-j    | 项目源码 | 文件源码
def rel_to_abs_f(vector, cell):
    """
    Converts a position vector in interal coordinates to absolut coordinates
    in Angstroem for a film structure (2D).
    """
    # TODO this currently only works if the z-coordinate is the one with no pbc
    # Therefore if a structure with x non pbc is given this should also work.
    # maybe write a 'tranform film to fleur_film routine'?
    if len(vector) == 3:
        postionR =  np.array(vector)
        postionR_f =  np.array(postionR[:2])
        #print postionR_f
        cell_np = np.array(cell)
        cell_np = np.array(cell_np[0:2, 0:2])
        #print cell_np
        new_xy = [i for i in np.matmul(postionR_f, cell_np)]
        new_abs_pos_f = [new_xy[0], new_xy[1], postionR[2]]
        return new_abs_pos_f
    else:
        return False
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_exceptions(self):
        dims = [
            ((1,), (2,)),            # mismatched vector vector
            ((2, 1,), (2,)),         # mismatched matrix vector
            ((2,), (1, 2)),          # mismatched vector matrix
            ((1, 2), (3, 1)),        # mismatched matrix matrix
            ((1,), ()),              # vector scalar
            ((), (1)),               # scalar vector
            ((1, 1), ()),            # matrix scalar
            ((), (1, 1)),            # scalar matrix
            ((2, 2, 1), (3, 1, 2)),  # cannot broadcast
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            assert_raises(ValueError, self.matmul, a, b)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_shapes(self):
        dims = [
            ((1, 1), (2, 1, 1)),     # broadcast first argument
            ((2, 1, 1), (1, 1)),     # broadcast second argument
            ((2, 1, 1), (2, 1, 1)),  # matrix stack sizes match
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            res = self.matmul(a, b)
            assert_(res.shape == (2, 1, 1))

        # vector vector returns scalars.
        for dt in self.types:
            a = np.ones((2,), dtype=dt)
            b = np.ones((2,), dtype=dt)
            c = self.matmul(a, b)
            assert_(np.array(c).shape == ())
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_numpy_ufunc_override(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        class A(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return "A"

        class B(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return NotImplemented

        a = A([1, 2])
        b = B([1, 2])
        c = np.ones(2)
        assert_equal(self.matmul(a, b), "A")
        assert_equal(self.matmul(b, a), "A")
        assert_raises(TypeError, self.matmul, b, c)
项目:vulk    作者:realitix    | 项目源码 | 文件源码
def mul(self, matrix):
        '''Multiply this matrix by `matrix`
        The order of operation is: `this @ matrix`.

        *Parameters:*

        - `matrix`: `Matrix4`
        '''
        # Make a matrix4 shape to matmul function
        view1 = np.reshape(self._values, (4, 4))
        view2 = np.reshape(matrix.values, (4, 4))
        self.tmp.shape = (4, 4)

        # np.matmul(view2, view1, out=out)
        np.matmul(view2, view1, out=self.tmp)

        self.tmp.shape = (16,)
        self._values[:] = self.tmp

        return self
项目:Sohu-LuckData-Image-Text-Matching-Competition    作者:WeitaoVan    | 项目源码 | 文件源码
def select_negtive(self, i_feat, s_feat, sess, topN=50):
        '''
        Select the triplets with the largest losses \n
        return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
        '''
        feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
        i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
        S = np.matmul(i_embed, s_embed.T)
        i_feat_pos = i_feat.repeat(topN, axis=0)
        s_feat_pos = s_feat.repeat(topN, axis=0)
        N = S.shape[0]
        np.fill_diagonal(S, -2*np.ones(N))
        neg_s_idx = S.argsort(axis=1)[:, -topN:]
        neg_i_idx = S.argsort(axis=0)[-topN:, :]
        s_feat_neg = s_feat[neg_s_idx.flatten('C')]
        i_feat_neg = i_feat[neg_i_idx.flatten('F')]
        return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
项目:Sohu-LuckData-Image-Text-Matching-Competition    作者:WeitaoVan    | 项目源码 | 文件源码
def select_negtive(self, i_feat, s_feat, sess, topN=50):
        '''
        Select the triplets with the largest losses \n
        return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
        '''
        feed_dict = {self.image_feat: i_feat, self.sentence_feat:s_feat}
        i_embed, s_embed = sess.run([self.image_fc2, self.sentence_fc2], feed_dict=feed_dict)
        S = np.matmul(i_embed, s_embed.T)
        i_feat_pos = i_feat.repeat(topN, axis=0)
        s_feat_pos = s_feat.repeat(topN, axis=0)
        N = S.shape[0]
        np.fill_diagonal(S, -2*np.ones(N))
        neg_s_idx = S.argsort(axis=1)[:, -topN:]
        neg_i_idx = S.argsort(axis=0)[-topN:, :]
        s_feat_neg = s_feat[neg_s_idx.flatten('C')]
        i_feat_neg = i_feat[neg_i_idx.flatten('F')]
        return i_feat_pos, s_feat_pos, i_feat_neg, s_feat_neg
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def get_xyz(interface, xyz_from_camera):
    angles = interface.current_status.angles[0:3]

    # Get current XYZ
    P0t = DobotModel.forward_kinematics(angles)

    # Getting Desired XYZ of end effector
    Pct = np.array(CAMERA_OFFSET)
    R0t = DobotModel.R0T(angles)
    Rtc = np.array([[0, 1, 0], [1, 0, 0], [0, 0, -1]])
    R0c = np.matmul(R0t, Rtc)

    Pta = np.matmul(R0c, xyz_from_camera) - np.matmul(R0c, Pct)
    target = np.reshape(Pta, (3, 1)) + np.reshape(P0t, (3, 1))
    return target


# FUNCTION: Touch - Place the end effector on top of an AR tag
# AR TAGS: DUCKY = 0  DUCKYBOT = 1   OBSTACLE = 2
项目:paradox    作者:ictxiangxin    | 项目源码 | 文件源码
def __compute_valid_convolution_nd(data, kernel, dimension: int):
    convolution_shape = tuple(data.shape[i] - kernel.shape[i] + 1 for i in range(-1, -dimension - 1, -1))
    list_dimension = reduce(lambda a, b: a * b, convolution_shape)
    data_prefix = data.shape[:-dimension]
    kernel_flat = kernel.ravel()
    data_flat = numpy.zeros(data_prefix + (list_dimension, len(kernel_flat)))
    for i in range(list_dimension):
        tensor_slice_start = [0] * len(kernel.shape)
        tensor_slice = [slice(None)] * len(data.shape)
        tensor_slice_start[-1] = i
        for r in range(-1, -len(kernel.shape) - 1, -1):
            dimension_scale = data.shape[r] - kernel.shape[r] + 1
            if tensor_slice_start[r] >= dimension_scale:
                tensor_slice_start[r + 1] = tensor_slice_start[r] // dimension_scale
                tensor_slice_start[r] %= dimension_scale
            tensor_slice[r] = slice(tensor_slice_start[r], tensor_slice_start[r] + kernel.shape[r])
        sub_convolution_index = (slice(None),) * (len(data.shape) - dimension) + tuple([i, slice(None)])
        data_flat[sub_convolution_index] = data[tensor_slice].reshape(data_prefix + (reduce(lambda a, b: a * b, kernel.shape),))
    convolution_flat = numpy.matmul(data_flat, numpy.flip(kernel_flat, axis=0))
    convolution_nd = convolution_flat.reshape(data_prefix + convolution_shape)
    return convolution_nd
项目:Aurora    作者:upul    | 项目源码 | 文件源码
def test_matmul_two_vars():
    x2 = ad.Variable(name='x2')
    x3 = ad.Variable(name='x3')
    y = ad.matmul(x2, x3)

    grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
    executor = ad.Executor([y, grad_x2, grad_x3])
    x2_val = np.array([[1, 2], [3, 4], [5, 6]])  # 3x2
    x3_val = np.array([[7, 8, 9], [10, 11, 12]])  # 2x3

    y_val, grad_x2_val, grad_x3_val = executor.run(feed_shapes={x2: x2_val, x3: x3_val})

    expected_yval = np.matmul(x2_val, x3_val)
    expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val))
    expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))

    assert isinstance(y, ad.Node)
    assert np.array_equal(y_val, expected_yval)
    assert np.array_equal(grad_x2_val, expected_grad_x2_val)
    assert np.array_equal(grad_x3_val, expected_grad_x3_val)
项目:Aurora    作者:upul    | 项目源码 | 文件源码
def test_matmul_var_and_param():
    x2 = ad.Variable(name="x2")
    w2_val = np.array([[7, 8, 9], [10, 11, 12]])  # 2x3
    w2 = ad.Parameter(name="w2", init=w2_val)
    y = ad.matmul(x2, w2)

    grad_x2, grad_w2 = ad.gradients(y, [x2, w2])

    executor = ad.Executor([y, grad_x2, grad_w2])
    x2_val = np.array([[1, 2], [3, 4], [5, 6]])  # 3x2

    y_val, grad_x2_val, grad_w2_val = executor.run(feed_shapes={x2: x2_val})

    expected_yval = np.matmul(x2_val, w2_val)
    expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(w2_val))
    expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))

    assert isinstance(y, ad.Node)
    # assert np.array_equal(y_val, expected_yval)
    # assert np.array_equal(grad_x2_val, expected_grad_x2_val)
    # assert np.array_equal(grad_w2_val, expected_grad_x3_val)
项目:Sisyphus    作者:davidbrandfonbrener    | 项目源码 | 文件源码
def output_step_scan(self, dummy, new_state):

        if self.dale_ratio:
            new_output = tf.matmul(
                            tf.nn.relu(new_state),
                            tf.matmul(
                                tf.abs(self.W_out) * self.output_Connectivity,
                                self.Dale_out,
                                name="in_2"),
                            transpose_b=True, name="3")\
                         + self.b_out

        else:
            new_output = tf.matmul(tf.nn.relu(new_state), self.W_out * self.output_Connectivity,
                                   transpose_b=True, name="3") + self.b_out

        return new_output
项目:learning-rank-public    作者:andreweskeclarke    | 项目源码 | 文件源码
def gradient(x0, X, y, alpha):
    # gradient of the logistic loss

    w, c = x0[1:137], x0[0]

    #print("c is " + str(c))
    z = X.dot(w) + c
    z = phi(y * z)
    z0 = (z - 1) * y
    grad_w = np.matmul(z0,X) / X.shape[0] + alpha * w
    grad_c = z0.sum() / X.shape[0]

    grad_c = np.array(grad_c)
    #print(grad_w[0,1:5])
    return np.c_[([grad_c], grad_w)]


##### Stochastic Gradient Descent Optimiser ######
项目:learning-rank-public    作者:andreweskeclarke    | 项目源码 | 文件源码
def gradient(x0, X, y, alpha):
    # gradient of the logistic loss

    w, c = x0[1:137], x0[0]

    #print("c is " + str(c))
    z = X.dot(w) + c
    z = phi(y * z)
    z0 = (z - 1) * y
    grad_w = np.matmul(z0,X) / X.shape[0] + alpha * w
    grad_c = z0.sum() / X.shape[0]

    grad_c = np.array(grad_c)
    #print(grad_w[0,1:5])
    return np.c_[([grad_c], grad_w)]


##### Stochastic Gradient Descent Optimiser ######
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_exceptions(self):
        dims = [
            ((1,), (2,)),            # mismatched vector vector
            ((2, 1,), (2,)),         # mismatched matrix vector
            ((2,), (1, 2)),          # mismatched vector matrix
            ((1, 2), (3, 1)),        # mismatched matrix matrix
            ((1,), ()),              # vector scalar
            ((), (1)),               # scalar vector
            ((1, 1), ()),            # matrix scalar
            ((), (1, 1)),            # scalar matrix
            ((2, 2, 1), (3, 1, 2)),  # cannot broadcast
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            assert_raises(ValueError, self.matmul, a, b)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_shapes(self):
        dims = [
            ((1, 1), (2, 1, 1)),     # broadcast first argument
            ((2, 1, 1), (1, 1)),     # broadcast second argument
            ((2, 1, 1), (2, 1, 1)),  # matrix stack sizes match
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            res = self.matmul(a, b)
            assert_(res.shape == (2, 1, 1))

        # vector vector returns scalars.
        for dt in self.types:
            a = np.ones((2,), dtype=dt)
            b = np.ones((2,), dtype=dt)
            c = self.matmul(a, b)
            assert_(np.array(c).shape == ())
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_numpy_ufunc_override(self):
        # Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
        return

        class A(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return "A"

        class B(np.ndarray):
            def __new__(cls, *args, **kwargs):
                return np.array(*args, **kwargs).view(cls)

            def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
                return NotImplemented

        a = A([1, 2])
        b = B([1, 2])
        c = np.ones(2)
        assert_equal(self.matmul(a, b), "A")
        assert_equal(self.matmul(b, a), "A")
        assert_raises(TypeError, self.matmul, b, c)
项目:pyMHT    作者:erikliland    | 项目源码 | 文件源码
def precalc(C, R, x_bar_list, P_bar_list):
    assert C.ndim == 2
    assert R.ndim == 2

    nMeasurement, nStates = x_bar_list.shape
    nObservableState = C.shape[0]

    z_hat_list = C.dot(x_bar_list.T).T
    S_list = np.matmul(np.matmul(C, P_bar_list), C.T) + R
    S_inv_list = np.linalg.inv(S_list)
    K_list = np.matmul(np.matmul(P_bar_list, C.T), S_inv_list)
    P_hat_list = P_bar_list - np.matmul(K_list.dot(C), P_bar_list)

    assert z_hat_list.shape == (nMeasurement, nObservableState), "z_hat ERROR"
    assert S_list.shape == (nMeasurement, nObservableState, nObservableState), "S ERROR"
    assert S_inv_list.shape == S_list.shape, "S_inv ERROR"
    assert K_list.shape == (nMeasurement, nStates, nObservableState)
    assert P_hat_list.shape == P_bar_list.shape, "P_hat ERROR"

    return z_hat_list, S_list, S_inv_list, K_list, P_hat_list
项目:information-dropout    作者:ucla-vision    | 项目源码 | 文件源码
def correlation(task,load=True):
    self = mytask
    if load:
        self.initialize(_load=True, _logging=False, _log_dir='other/')
    data = []
    for batch in self.iterate_minibatches('valid'):
        xtrain, ytrain = batch
        ytrain = np.eye(10)[ytrain]
        feed_dict = {self.x: xtrain, self.y: ytrain, self.sigma0: 1., self.initial_keep_prob: task['initial_keep_prob'],  self.is_training: False}
        z = tf.get_collection('log_network')[-1]
        batch_z = self.sess.run( z, feed_dict)
        data.append(batch_z)
    data = np.vstack(data)
    data = data.reshape(data.shape[0],-1)
    def normal_tc(c0):
        c1i = np.diag(1./np.diag(c0))
        p = np.matmul(c1i,c0)
        return - .5 * np.linalg.slogdet(p)[1] / c0.shape[0]
    c0 = np.cov( data, rowvar=False )
    tc = normal_tc(c0)
    print "Total correlation: %f" % tc
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_exceptions(self):
        dims = [
            ((1,), (2,)),            # mismatched vector vector
            ((2, 1,), (2,)),         # mismatched matrix vector
            ((2,), (1, 2)),          # mismatched vector matrix
            ((1, 2), (3, 1)),        # mismatched matrix matrix
            ((1,), ()),              # vector scalar
            ((), (1)),               # scalar vector
            ((1, 1), ()),            # matrix scalar
            ((), (1, 1)),            # scalar matrix
            ((2, 2, 1), (3, 1, 2)),  # cannot broadcast
            ]

        for dt, (dm1, dm2) in itertools.product(self.types, dims):
            a = np.ones(dm1, dtype=dt)
            b = np.ones(dm2, dtype=dt)
            assert_raises(ValueError, self.matmul, a, b)