Python numpy 模块,mat() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.mat()

项目:invo    作者:rafidrm    | 项目源码 | 文件源码
def FOP(self, A, b):
        """ Create a forward optimization problem.

        Args:
            A (matrix): numpy matrix of shape :math:`m \\times n`.
            b (matrix): numpy matrix of shape :math:`m \\times 1`.

        Currently, the forward problem is constructed by the user supplying a
        constraint matrix ``A`` and vector ``b``. The forward problem is

        .. math::

            \min_{\mathbf{x}} \quad&\mathbf{c'x}

            \\text{s.t} \quad&\mathbf{A x \geq b}
        """
        #self.A = np.mat(A)
        #self.b = np.mat(b)
        self.A, self.b = validateFOP(A, b)
        self._fop = True
项目:invo    作者:rafidrm    | 项目源码 | 文件源码
def FOP(self, A, b):
        """ Create a forward optimization problem.

        Args:
            A (matrix): numpy matrix of shape :math:`m \\times n`.
            b (matrix): numpy matrix of shape :math:`m \\times 1`.

        Currently, the forward problem is constructed by the user supplying a
        constraint matrix `A` and vector `b`. The forward problem is

        .. math::

            \min_{\mathbf{x}} \quad&\mathbf{c'x}

            \\text{s.t} \quad&\mathbf{A x \geq b}
        """
        #self.A = np.mat(A)
        #self.b = np.mat(b)
        self.A, self.b = validateFOP(A, b)
        self._fop = True
项目:invo    作者:rafidrm    | 项目源码 | 文件源码
def rho(self, points):
        """ Solves the goodness of fit.
        """
        assert self._solved, 'you need to solve first.'

        m, n = self.A.shape
        projections = self.optimal_points(points)
        _pts = [np.mat(pt).T for pt in points]
        numer = [
            np.linalg.norm(pj - pt, self.p)
            for pj, pt in zip(projections, _pts)
        ]
        numer = sum(numer)
        denom = 0
        for i in range(m):
            ai = self.A[i]
            bi = self.b[i]
            result = self._project_to_hyperplane(points, ai, bi)
            denom += result
        rho = 1 - numer / denom
        return rho
项目:invo    作者:rafidrm    | 项目源码 | 文件源码
def FOP(self, A, b):
        """ Create a forward optimization problem.

        Args:
            A (matrix): numpy matrix of shape :math:`m \\times n`.
            b (matrix): numpy matrix of shape :math:`m \\times 1`.

        Currently, the forward problem is constructed by the user supplying a
        constraint matrix ``A`` and vector ``b``. The forward problem is

        .. math::

            \min_{\mathbf{x}} \quad&\mathbf{c'x}

            \\text{s.t} \quad&\mathbf{A x \geq b}
        """
        #self.A = np.mat(A)
        #self.b = np.mat(b)
        self.A, self.b = validateFOP(A, b)
        self._fop = True
项目:OCR    作者:OrangeGuo    | 项目源码 | 文件源码
def train(self, training_data_array):
        for data in training_data_array:
            # ??????????
            y1 = np.dot(np.mat(self.theta1), np.mat(data.y0).T)
            sum1 = y1 + np.mat(self.input_layer_bias)
            y1 = self.sigmoid(sum1)

            y2 = np.dot(np.array(self.theta2), y1)
            y2 = np.add(y2, self.hidden_layer_bias)
            y2 = self.sigmoid(y2)

            # ??????????
            actual_vals = [0] * 10
            actual_vals[data.label] = 1
            output_errors = np.mat(actual_vals).T - np.mat(y2)
            hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors), self.sigmoid_prime(sum1))

            # ???????????
            self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors), np.mat(data.y0))
            self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors), np.mat(y1).T)
            self.hidden_layer_bias += self.LEARNING_RATE * output_errors
            self.input_layer_bias += self.LEARNING_RATE * hidden_errors
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def _gene_signature(self,wm,size,key):
        '''????????????????????????'''
        wm = cv2.resize(wm,(size,size))        
        wU,_,wV = np.linalg.svd(np.mat(wm))


        sumU = np.sum(np.array(wU),axis=0)
        sumV = np.sum(np.array(wV),axis=0)

        sumU_mid = np.median(sumU)
        sumV_mid = np.median(sumV)

        sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
        sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])

        uv_xor=np.logical_xor(sumU,sumV)

        np.random.seed(key)
        seq=np.random.randint(2,size=len(uv_xor))

        signature = np.logical_xor(uv_xor, seq)

        sqrts = int(np.sqrt(size))
        return np.array(signature,dtype=np.int8).reshape((sqrts,sqrts))
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def _gene_signature(self,wm,key):
        '''????????????????????????'''
        wm = cv2.resize(wm,(256,256))        
        wU,_,wV = np.linalg.svd(np.mat(wm))


        sumU = np.sum(np.array(wU),axis=0)
        sumV = np.sum(np.array(wV),axis=0)

        sumU_mid = np.median(sumU)
        sumV_mid = np.median(sumV)

        sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
        sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])

        uv_xor=np.logical_xor(sumU,sumV)

        np.random.seed(key)
        seq=np.random.randint(2,size=len(uv_xor))

        signature = np.logical_xor(uv_xor, seq)
        return np.array(signature,dtype=np.int8)
项目:watermark    作者:lishuaijuly    | 项目源码 | 文件源码
def _extract_svd_sig(self,vec,siglen):
        Q = 32
        ext_sig=[]

        for i in range(0,vec.shape[0],8):  #128*128
            for j in range(0,vec.shape[1],8):
                u,s,v = np.linalg.svd(np.mat(vec[i:i+8,j:j+8]))
                z = s[0] % Q
                if z>=Q/2 :
                    ext_sig.append(1)                    
                else:
                    ext_sig.append(0)

        if siglen >len(ext_sig):
            logging.warning('extract svd sig is {},small  than needed {}'.format(len(ext_sig),siglen))
            ext_sig.extend([0] * (siglen - len(ext_sig)))
        else:
            ext_sig = ext_sig[:siglen]

        return [ext_sig]


##################################################################################################################################
项目:hamming-stego    作者:DakotaNelson    | 项目源码 | 文件源码
def encode(msg):
    """ passed a list of bits (integers, 1 or 0), returns a hamming(8,4)-coded
        list of bits """
    while len(msg) % 4 != 0:
        # pad the message to length
        msg.append(0)

    msg = np.reshape(np.array(msg), (-1, 4))

    # create parity bits using transition matrix
    transition = np.mat('1,0,0,0,0,1,1,1;\
                         0,1,0,0,1,0,1,1;\
                         0,0,1,0,1,1,0,1;\
                         0,0,0,1,1,1,1,0')

    result =  np.dot(msg, transition)

    # mod 2 the matrix multiplication
    return np.mod(result, 2)
项目:hamming-stego    作者:DakotaNelson    | 项目源码 | 文件源码
def syndrome(msg):
    """ passed a list of hamming(8,4)-encoded bits (integers, 1 or 0),
        returns an error syndrome for that list """

    msg = np.reshape(np.array(msg), (-1, 8)).T

    # syndrome generation matrix
    transition = np.mat('0,1,1,1,1,0,0,0;\
                         1,0,1,1,0,1,0,0;\
                         1,1,0,1,0,0,1,0;\
                         1,1,1,0,0,0,0,1')

    result = np.dot(transition, msg)

    # mod 2 the matrix multiplication
    return np.mod(result, 2)
项目:PPRE    作者:MaoYuwei    | 项目源码 | 文件源码
def PatPatSimilarity(p, c):
    #??bet??????

    #?????
    # print 'p', p
    # print 'c', c
    # print 'p len ', len(p)
    # print 'c len ', len(c[1])
    A = np.mat(p[1:])
    B = np.mat(c[1:])
    # A = np.mat(p)
    # B = np.mat(c[1])

    num = A * B.T
    denom = np.linalg.norm(A) * np.linalg.norm(B)
    cos = num / denom  # ???
    # sim = 0.5 + 0.5 * cos  # ???
    return cos
项目:Spherical-robot    作者:Evan-Zhao    | 项目源码 | 文件源码
def tdoa_to_position(time_diff, sensor_pos):
    sensors = len(time_diff)
    if len(time_diff) != len(sensor_pos):
        raise Exception('Channel number mismatch.')

    dist_diff = []
    for x in time_diff:
        dist_diff.append(x * sound_speed)

    inhom_mat = np.mat(np.zeros([sensors - 2, 1]))
    coeff_mat = np.mat(np.zeros([sensors - 2, 3]))
    for i in range(2, sensors):
        args = dist_diff[1], dist_diff[i], \
               sensor_pos[0], sensor_pos[1], sensor_pos[i]
        coeff_mat[i - 2, :] = coeff(*args)
        inhom_mat[i - 2] = -inhom(*args)

    x_sol = lin.pinv(coeff_mat) * inhom_mat
    return x_sol[0, 0], x_sol[1, 0], x_sol[2, 0]
项目:chat    作者:Decalogue    | 项目源码 | 文件源码
def jaccard_pinyin(pv1, pv2):
    """Similarity score between two pinyin vectors with jaccard.
    ?????????jaccard??????

    According to the semantic jaccard model to calculate the similarity.
    The similarity score interval for each two pinyin sentences was [0, 1].
    ????jaccard??????????????????????????[0, 1]?
    """
    sv_matrix = []
    sv_rows = []
    for pinyin1 in pv1:
        for pinyin2 in pv2:
            score = match_pinyin(pinyin1, pinyin2)
            sv_rows.append(score)
        sv_matrix.append(sv_rows)
        sv_rows = []
    matrix = mat(sv_matrix)
    result = sum_cosine(matrix, 0.7)
    total = result["total"]
    total_dif = result["total_dif"]
    num = result["num_not_match"]
    sim = total/(total + num*(1-total_dif))
    return sim
项目:RFHO    作者:lucfra    | 项目源码 | 文件源码
def load_iros15(folder=IROS15_BASE_FOLDER, resolution=15, legs='all', part_proportions=(.7, .2), one_hot=True,
                shuffle=True):
    resolutions = (5, 11, 15)
    legs_names = ('LF', 'LH', 'RF', 'RH')
    assert resolution in resolutions
    folder += str(resolution)
    if legs == 'all': legs = legs_names
    base_name_by_leg = lambda leg: os.path.join(folder, 'trainingSet%sx%sFromSensor%s.mat'
                                                % (resolution, resolution, leg))

    datasets = {}
    for _leg in legs:
        dat = scio.loadmat(base_name_by_leg(_leg))
        data, target = dat['X'], to_one_hot_enc(dat['Y']) if one_hot else dat['Y']
        # maybe pre-processing??? or it is already done? ask...
        datasets[_leg] = Datasets.from_list(
            redivide_data([Dataset(data, target, info={'leg': _leg})],
                          partition_proportions=part_proportions, shuffle=shuffle))
    return datasets
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
    import numpy as np
    from math import factorial
    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError, msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
    m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
    firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
    lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve( m[::-1], y, mode='valid')
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
    import numpy as np
    from math import factorial
    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError, msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
    m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
    firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
    lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve( m[::-1], y, mode='valid')
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
    import numpy as np
    from math import factorial
    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError, msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
    m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
    firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
    lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve( m[::-1], y, mode='valid')
项目:BOHP_RNN    作者:ThomasMiconi    | 项目源码 | 文件源码
def savitzky_golay(y, window_size, order, deriv=0, rate=1):


    try:
        window_size = np.abs(np.int(window_size))
        order = np.abs(np.int(order))
    except ValueError, msg:
        raise ValueError("window_size and order have to be of type int")
    if window_size % 2 != 1 or window_size < 1:
        raise TypeError("window_size size must be a positive odd number")
    if window_size < order + 2:
        raise TypeError("window_size is too small for the polynomials order")
    order_range = range(order+1)
    half_window = (window_size -1) // 2
    # precompute coefficients
    b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
    m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
    # pad the signal at the extremes with
    # values taken from the signal itself
    firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
    lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
    y = np.concatenate((firstvals, y, lastvals))
    return np.convolve( m[::-1], y, mode='valid')
项目:forward    作者:yajun0601    | 项目源码 | 文件源码
def transcoding(x):
    l1 = list(x)
    province = list(set(l1))
    n = len(province)
    mat = [[0 for j in range(n)] for i in range(n)]
    province_dict = {}
    for i in range(n):
        mat[i][i] = 1
        province_dict[str(province[i])] = mat[i]
    ret = []
    for i in range(len(l1)):
        key = str(l1[i])
        ret.append(province_dict[key])
    return pd.DataFrame(ret),province_dict


#hot_coding,province_dict = transcoding(df[10])
项目:l1dbproto    作者:lsst-dm    | 项目源码 | 文件源码
def rotation_matrix(a, b):
    """
    Create rotation matrix to rotate vector a into b.

    After http://math.stackexchange.com/a/476311

    Parameters
    ----------
    a,b
        xyz-vectors
    """

    v = np.cross(a, b)
    sin = np.linalg.norm(v)
    if sin == 0:
        return np.identity(3)
    cos = np.vdot(a, b)
    vx = np.mat([[0, -v[2], v[1]], [v[2], 0., -v[0]], [-v[1], v[0], 0.]])

    R = np.identity(3) + vx + vx * vx * (1 - cos) / (sin ** 2)

    return R
项目:GraphicalModelForRecommendation    作者:AlgorithmFan    | 项目源码 | 文件源码
def _update_parameters(self, factors0, factors1, ratings, factors_mu, factors_variance):
        """
        :param factors0:
        :param factors1:
        :param ratings:
        :param factors_mu:
        :param factors_variance:
        :return:
        """
        index = ratings.keys()

        QQ = 0
        RQ = 0
        for dim0, dim1 in index:
            Q = factors0[dim0, :] * factors1[dim1, :]
            QQ += np.mat(Q).transpose() * np.mat(Q)
            RQ += (ratings[dim0, dim1] - self.mean_rating) * Q
        sigma_inv = np.linalg.inv(factors_variance + self.rating_sigma * QQ)
        mu = sigma_inv * (np.dot(factors_variance, np.reshape(factors_mu, newshape=(factors_mu.shape[0], 1))) + self.rating_sigma * RQ)
        return np.random.multivariate_normal(mu, sigma_inv)
项目:GraphicalModelForRecommendation    作者:AlgorithmFan    | 项目源码 | 文件源码
def _update_time_parameters(self, user_factors, item_factors, time_factors, ratings, factors_mu, factors_variance, time_id):
        index = ratings.keys()
        QQ, RQ = 0.0, 0.0
        for dim0, dim1 in index:
            Q = user_factors[dim0, :] * item_factors[dim1, :]
            QQ += np.mat(Q).transpose() * np.mat(Q)
            RQ += (ratings[dim0, dim1] - self.mean_rating) * Q

        RQ = np.reshape(RQ, newshape=(RQ.shape[0], 1))
        if time_id == 0:
            mu = (time_factors[1, :] + factors_mu) / 2
            sigma_inv = np.linalg.inv(2 * factors_variance + self.rating_sigma * QQ)
        elif time_id == self.time_num-1:
            sigma_inv = np.linalg.inv(factors_variance + self.rating_sigma * QQ)
            Tk_1 = np.reshape(time_factors[self.time_num-2, :], newshape=(time_factors.shape[1], 1))
            mu = sigma_inv * (np.dot(factors_variance, Tk_1) + self.rating_sigma * RQ)
        else:
            sigma_inv = np.linalg.inv(2 * factors_variance + self.rating_sigma * QQ)
            Tk = time_factors[time_id-1, :] + time_factors[time_id+1, :]
            mu = sigma_inv * (np.dot(factors_variance, np.reshape(Tk, newshape=(Tk.shape[0], 1))) + self.rating_sigma * RQ)

        return np.random.multivariate_normal(mu, sigma_inv)
项目:joint-demosaicing-denoising-sem    作者:VLOGroup    | 项目源码 | 文件源码
def DCT(width, height, depth):
    N = width
    M = depth
    filtMtx = np.zeros((N*N*M, N*N*M))
    xn = np.arange(0,N)
    Xn, Yn = np.meshgrid(xn,xn, sparse=False)
    xm = np.arange(0,M)
    Xm, Ym = np.meshgrid(xm,xm, sparse=False)

    dctBasisN = np.cos((np.pi / N) * (Yn + 0.5)*Xn)
    dctBasisN = np.mat(dctBasisN)
    dctBasisM = np.cos((np.pi / M) * (Ym + 0.5)*Xm)
    dctBasisM = np.mat(dctBasisM)

    for i in range(0,N):
        for j in range(0,N):
            filt2d = dctBasisN[:,j].dot(dctBasisN[:,i].T)
            filt2d = filt2d.reshape(N**2,1)
            for k in range(0,M):
                filt = filt2d.dot(dctBasisM[:,k].T)
                filt = filt/np.linalg.norm(filt)  # L2 normalization
                filtMtx[:,j*N+k*N*N + i] = filt.reshape(N*N*M)
    return filtMtx.astype("float32")[:,1:]

#load parameters theta from file
项目:intelligentCampus    作者:Jackal007    | 项目源码 | 文件源码
def createValidateDataSet():
    '''
    get validate data
    '''
    db = MyDataBase.MyDataBase("validate")
    conn, executer = db.getConn(), db.getExcuter()
    # get all the students
    executer.execute("select * from students_rank")
    students,dataSet = [],[]
    for i in executer.fetchall():
        student = Student(studentId=i[0], attributes=list(i[1:-1]), subsidy=i[-1])
        dataSet.append(student.getAll())
        students.append(student)
    conn.close();executer.close()
    dataSet = mat(dataSet)
    return students,dataSet[:, :-1]
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def unprojectOpenGL(self, u):
        # K, R, t = camera.factor()

        # squareProj = np.row_stack((
        #     camera.P,
        #     np.array([0,0,0,1], np.float32)
        # ))
        # invProj = np.linalg.inv(squareProj)
        # x = invProj*np.row_stack([np.mat(u).T, [1]])
        # x = x[:3]

        # u = np.mat(u).T
        # x = np.linalg.inv(R)*(np.linalg.inv(K)*u - t)

        proj = self.getOpenGlCameraMatrix()
        invProj = np.linalg.inv(proj)
        x = invProj*np.row_stack([np.mat(u).T, [1]])
        x = x[:3] / x[3]
        return x
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def unproject(self, u):
        # K, R, t = camera.factor()

        # squareProj = np.row_stack((
        #     camera.P,
        #     np.array([0,0,0,1], np.float32)
        # ))
        # invProj = np.linalg.inv(squareProj)
        # x = invProj*np.row_stack([np.mat(u).T, [1]])
        # x = x[:3]

        # u = np.mat(u).T
        # x = np.linalg.inv(R)*(np.linalg.inv(K)*u - t)

        proj = self.getOpenGlCameraMatrix()
        invProj = np.linalg.inv(proj)
        x = invProj*np.row_stack([np.mat(u).T, [1]])
        x = x[:3] / x[3]
        return x

    # TODO: Fix handling of camera centre.
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def filter(self, kpt1, feat1, kpt2, feat2):
        kpt1 = np.array([(k.pt[0],k.pt[1]) for k in kpt1])
        kpt2 = np.array([(k.pt[0],k.pt[1]) for k in kpt2])
        self.normalrize(kpt1), self.normalrize(kpt2)
        idx = self.match(feat1, feat2)
        if self.dim == 0: 
            return idx, np.ones(len(idx), dtype=np.bool), 1
        mask = []
        for i1, i2 in idx:
            v1 = np.mat(kpt1[i1])
            v2 = np.mat(kpt2[i2])
            if self.test(v1, v2):
                self.accept(v1.T,v2.T)
                mask.append(True)
            else: mask.append(False)
        mask = np.array(mask)
        #print mask
        return idx, mask, self.V
项目:Python    作者:TheAlgorithms    | 项目源码 | 文件源码
def __init__(self,conv1_get,size_p1,bp_num1,bp_num2,bp_num3,rate_w=0.2,rate_t=0.2):
        '''
        :param conv1_get: [a,c,d]?size, number, step of convolution kernel
        :param size_p1: pooling size
        :param bp_num1: units number of flatten layer
        :param bp_num2: units number of hidden layer
        :param bp_num3: units number of output layer
        :param rate_w: rate of weight learning
        :param rate_t: rate of threshold learning
        '''
        self.num_bp1 = bp_num1
        self.num_bp2 = bp_num2
        self.num_bp3 = bp_num3
        self.conv1 = conv1_get[:2]
        self.step_conv1 = conv1_get[2]
        self.size_pooling1 = size_p1
        self.rate_weight = rate_w
        self.rate_thre = rate_t
        self.w_conv1 = [np.mat(-1*np.random.rand(self.conv1[0],self.conv1[0])+0.5) for i in range(self.conv1[1])]
        self.wkj = np.mat(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5)
        self.vji = np.mat(-1*np.random.rand(self.num_bp2, self.num_bp1)+0.5)
        self.thre_conv1 = -2*np.random.rand(self.conv1[1])+1
        self.thre_bp2 = -2*np.random.rand(self.num_bp2)+1
        self.thre_bp3 = -2*np.random.rand(self.num_bp3)+1
项目:NGImageProcessor    作者:artzers    | 项目源码 | 文件源码
def LaplaceFFTDemo(self):
        origfftimg = self.PrepareFFT()
        fftimg = origfftimg.copy()
        sz = fftimg.shape
        center = np.mat(fftimg.shape) / 2.0
        for i in xrange(0, 512):
            for j in xrange(0, 512):
                #pass
                #print -(np.float64(i - center[0, 0]) ** 2.0 + np.float64(j - center[0, 1]) ** 2.0)
                fftimg[i, j] *= - 0.00001* (np.float64(i - 256) ** 2.0 + np.float64(j - 256) ** 2.0)
        ifft = self.GetIFFT(fftimg)
        #plt.imshow(np.real(fftimg))
        #plt.show()
        # cv2.namedWindow("fft1")
        # cv2.imshow("fft1", np.real(origfftimg))
        cv2.namedWindow("fft")
        cv2.imshow("fft", np.real(fftimg))
        # cv2.imshow("ifft", np.uint8(ifft))
        cv2.namedWindow("ifft")
        cv2.imshow("ifft", ifft)
        cv2.waitKey(0)
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def calculate_objective(self,spontaneous,w1,alpha,w2,events,train_times):
        T=train_times
        N=len(events)
        s=events
        old_sum2 = 0
        obj = numpy.log(spontaneous*numpy.exp(-w1*s[0]))
        for i in range(1,N):
            mu = spontaneous*numpy.exp(-w1*s[i])
            sum1 = mu
            sum2 = (old_sum2 + alpha)*numpy.exp(-w2*(s[i]-s[i-1]))
            old_sum2 = sum2
            obj=obj+numpy.log(sum1+sum2)
        activate = numpy.exp(-w2*(T-numpy.mat(s)))
        activate_sum = numpy.sum((1-activate))*alpha/float(w2)
        obj= obj - activate_sum 
        obj = obj - (spontaneous/w1) * (1 - numpy.exp(-w1*T))
        return obj
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def predict_one(self,_id,duration,pred_year):
        try:
            patent = self.params['patent'][str(_id)]
        except KeyError,e:
            return None
        w1 = self.params['w1']
        alpha = patent['alpha']
        w2 = self.params['w2']
        fea = numpy.mat(patent['fea'])
        ti = patent['cite']
        beta = numpy.mat(self.params['beta'])

        cut_point = pred_year - int(float((patent['year'])))
        tr = numpy.mat([x for x in ti if x <= cut_point])
        pred = self.predict_year_by_year(tr,cut_point,duration,
            beta*numpy.mat(fea).T,w1,alpha,w2)

        _dict = {}
        for i in range(len(pred)):
            year = pred_year + i + 1
            _dict[year] = pred[i]
        _list = sorted(_dict.items(),key=lambda x:x[0])
        return _list
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def predict_year_by_year(self,tr,cut_point,duration,spontaneous,w1,alpha,w2):
        N = tr.shape[1] 
        pred = []
        for t in range(cut_point+1,cut_point+duration+1):
            delta_ct = spontaneous/w1*(numpy.exp(-w1*(t-1))-numpy.exp(-w1*t)) + \
                alpha/w2*(numpy.sum(numpy.exp(-w2*((t-1)-tr)))-numpy.sum(numpy.exp(-w2*(t-tr))))
            delta_ct = delta_ct[0,0]
            if len(pred) == 0:
                ct = N + delta_ct
            else :
                ct = pred[-1] + delta_ct
            tr = tr.tolist()[0]
            tr.extend([t for i in range(int(delta_ct))])
            tr = numpy.mat(tr)
            pred.append(ct)
        return pred
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def update_sequence_weights(self,pids,Alpha,features,sequences,publish_years,predict_year,beta,W1,W2):

        result = []
        for i in range(len(pids)):
            seq = {}
            fea = numpy.mat(features[i])
            beta = numpy.mat(beta)
            seq['seq_id'] = i
            seq['paper_id'] = pids[i]
            seq['theta'] = W1[i]
            seq['w'] = W2[i]
            seq['alpha'] = Alpha[i]
            seq['fea'] = features[i]
            seq['beta'] = beta.tolist()
            seq['spont'] = (fea*beta).tolist()[0]
            result.append(seq)

        self.sequence_weights = result
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def simhawkes(trseq,T1,T2,v,w1,alpha,w2):
    t = T1
    while t<T2:
        lam = v*numpy.exp(-w1*t) + numpy.sum(alpha*numpy.exp(-w2*(t-trseq)))
        #v*exp(-w1*t) + sum(alpha*exp(-w2*(t-trseq)));
        u = numpy.random.random() # rand();
        t = t+(-numpy.log(numpy.random.random())/float(lam)) #t+(-log(rand)/lam);
        lam2 = v*numpy.exp(-w1*t) + numpy.sum(alpha*numpy.exp(-w2*(t-trseq))) 
        #v*exp(-w1*t) + sum(alpha*exp(-w2*(t-trseq)));
        if t<T2 and u*lam<lam2:
             trseq = numpy.concatenate((trseq,numpy.mat(t)),axis=1) #[trseq;t];
        #end
        if trseq.shape[1] > 1e3: #length(trseq)>1e3
            break
        #end
    #end
    return trseq
    #ct = trseq

    #end
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def cal_obj(v,w1,alpha,w2,events,trainT): #function [obj]=cal_obj(v,w1,alpha,w2,events,trainT)
    T=trainT
    N=len(events)
    s=events
    old_sum2 = 0
    obj = numpy.log(v*numpy.exp(-w1*s[0])) #log(v*exp(-w1*s(1)));
    for i in range(1,N): #i=2:N
        mu = v*numpy.exp(-w1*s[i]) #v*exp(-w1*s(i));
        sum1 = mu
        sum2 = (old_sum2 + alpha)*numpy.exp(-w2*(s[i]-s[i-1])) #(old_sum2+ alpha)*exp(-w2*(s(i)-s(i-1)));
        old_sum2 = sum2
        obj=obj+numpy.log(sum1+sum2)
    #end
    ____1 = numpy.exp(-w2*(T-numpy.mat(s)))
    ____2 = numpy.sum((1-    ____1))*alpha/float(w2)
    obj= obj - ____2 #obj - sum((1-exp(-w2*(T-s))))*alpha/w2;
    obj = obj - (v/w1) * (1 - numpy.exp(-w1*T)) #obj - v/w1*(1-exp(-w1*T));
    return obj
#end
项目:apicultor    作者:sonidosmutantes    | 项目源码 | 文件源码
def rbf_kernel(self, x, y, gamma):
    """
    Custom sigmoid kernel function, similarities of vectors using a radial basis function kernel
    :param x: array of input vectors
    :param y: array of input vectors
    :param gamma: reach factor
    :returns:
      - rbfk: radial basis of the kernel's inner product
    """     
        mat1 = np.mat(x) #convert to readable matrices
        mat2 = np.mat(y)                                                                                                
        trnorms1 = np.mat([(v * v.T)[0, 0] for v in mat1]).T #norm matrices
        trnorms2 = np.mat([(v * v.T)[0, 0] for v in mat2]).T                                                                                
        k1 = trnorms1 * np.mat(np.ones((mat2.shape[0], 1), dtype=np.float64)).T #dot products of y and y transposed and x and x transposed   
        k2 = np.mat(np.ones((mat1.shape[0], 1), dtype=np.float64)) * trnorms2.T          

        rbfk = k1 + k2 #sum products together
        rbfk -= 2 * np.mat(mat1 * mat2.T) #dot product of x and y transposed                                         
        rbfk *= - 1./(2 * np.power(gamma, 2)) #radial basis
        np.exp(rbfk,rbfk)
        return np.array(rbfk)
项目:mieSys    作者:breadada    | 项目源码 | 文件源码
def load_ad_info(dict_ad_info, user_behavior):
    list_ad_info = []
    for ad in dict_ad_info:
        ad_id = dict_ad_info[ad][0]
        position = 1
        advertiser_id = int(dict_ad_info[ad][1])
        price = int(dict_ad_info[ad][5])
        ad_tag = dict_ad_info[ad][4]
        user_tag = user_behavior[0][0]
        user_sex = user_behavior[0][1]
        list_ad_info.append([-1, ad_id, position, advertiser_id, price, ad_tag, user_tag, user_sex])
        # print list_ad_info
    list_ad_info.append([-1, ad_id, 2, advertiser_id, price, ad_tag, user_tag, user_sex])
    name = ['click', 'ad_id', 'position', 'advertiser_id', 'price', 'ad_tag', 'user_tag', 'user_sex']
    # np_ad_info = np.mat(list_ad_info)
    df_ad_info = pd.DataFrame(list_ad_info, columns=name)
    file = open("f_origin_8features.pkl", 'wb')
    pickle.dump(df_ad_info, file)
    file.close()

    #print 'DONE'
    return df_ad_info
项目:HIT_ML_2017    作者:Red-Night-Aria    | 项目源码 | 文件源码
def Back_Propagation():
    global In_param
    global Out_param
    global thres_in
    global thres_out

    rate = 0.1

    for epoch in range(50000):
        for id, item in enumerate(traits):

            hid_In    = np.array(np.mat(item) * np.mat(In_param))
            hid_Out   = sigmoid(hid_In - thres_in)

            fin_In    = np.array(np.mat(hid_Out) * np.mat(Out_param))
            fin_Out   = sigmoid(fin_In - thres_out)

            g         = fin_Out * (1.0 - fin_Out) * (judge[id] - fin_Out)
            e         = hid_Out * (1.0 - hid_Out) * np.array([np.dot(x, g) for x in Out_param])

            In_param  += np.array(rate * np.matrix(item).T * np.matrix(e))
            Out_param += np.array(rate * np.matrix(hid_Out).T * np.matrix(g))
            thres_in  -= rate * e
            thres_out -= rate * g
项目:MachineLearningWithPython    作者:asonee    | 项目源码 | 文件源码
def PCA(dataset, topFeatNum = 2):
    #????
    #1???????????? 
    #2??????????? 
    #3?????????????? 
    #4???xx?????????? 
    #5?????????????
    datasetMat = np.mat(dataset)
    meanValues = np.mean(datasetMat, axis = 0)
    stds = np.std(datasetMat, axis = 0)
    adjustedDatasetMat = datasetMat - meanValues
    adjustedDatasetMat = adjustedDatasetMat / stds
    plt.plot(adjustedDatasetMat[:, 0], adjustedDatasetMat[:, 1], "r^")
    plt.show()
    covMat = np.cov(adjustedDatasetMat, rowvar = 0)
    #covMat = (adjustedDatasetMat.T * adjustedDatasetMat) / datasetMat.shape[0] #?????0????????
    eigenVals, eigenVecs = np.linalg.eig(np.mat(covMat))
    draw(eigenVals) #?????????????????
    eigenValsIndex = np.argsort(eigenVals) #?eigenVals???????????????
    eigenValsIndex = eigenValsIndex[: -(topFeatNum+1) : -1] #??eigenVals???topFeatNum?????
    eigenVecs = eigenVecs[:, eigenValsIndex] #????topFeatNum????????eigenValues????
    transformedDatasetMat = adjustedDatasetMat * eigenVecs

    return transformedDatasetMat
项目:RNNVis    作者:myaooo    | 项目源码 | 文件源码
def mds(d, dimensions=2):
    """
    Multidimensional Scaling - Given a matrix of interpoint distances,
    find a set of low dimensional points that have similar interpoint
    distances.
    """

    E = (-0.5 * d**2)

    # Use mat to get column and row means to act as column and row means.
    Er = np.mat(np.mean(E, 1))
    Es = np.mat(np.mean(E, 0))

    # From Principles of Multivariate Analysis: A User's Perspective (page 107).
    F = np.array(E - np.transpose(Er) - Es + np.mean(E))

    U, S, V = svd(F)

    Y = U * np.sqrt(S)

    return Y[:, 0:dimensions], S
项目:South-African-Heart-Disease-data-analysis-using-python    作者:khushi4tiwari    | 项目源码 | 文件源码
def plotPrincipalComponents(principal1, principal2, X, y, classNames):
    C = len(classNames)    
    Y = X - np.ones((len(X),1))*X.mean(0)

    U,S,V = linalg.svd(Y,full_matrices=False)
    V = mat(V).T

    Z = Y * V

    # Plot PCA of the data
    f = figure()
    f.hold()
    title('Data projected onto Principal Components')
    for c in range(C):
        class_mask = y.A.ravel()==c
        plot(Z[class_mask,principal1], Z[class_mask,principal2], 'o')
    legend([convertToWord(i) for i in classNames])
    xlabel('PC{0}'.format(principal1+1))
    ylabel('PC{0}'.format(principal2+1))
    show()

# Gets the direction of a certain principal component
项目:South-African-Heart-Disease-data-analysis-using-python    作者:khushi4tiwari    | 项目源码 | 文件源码
def plot3DPrincipalComponents(X,y,classNames,prin1,prin2,prin3,attributeNames):
    C = len(classNames)    
    Y = X - np.ones((len(X),1))*X.mean(0)

    U,S,V = linalg.svd(Y,full_matrices=False)
    V = mat(V).T

    Z = Y * V

    f = figure()
    hold(True)
    colors = ['blue', 'green']
    ax = f.add_subplot(111, projection='3d')
    for c in range(C):
        class_mask = (y==c).A.ravel()
        ax.scatter(Z[class_mask,prin1].A, Z[class_mask,prin2].A, Z[class_mask,prin3].A, c=colors[c])    
    ax.set_xlabel('PC{0}'.format(prin1+1))
    ax.set_ylabel('PC{0}'.format(prin2+1))
    ax.set_zlabel('PC{0}'.format(prin3+1))
    title("3D plot of principal components")
    legend(attributeNames)


#Using CHD as attribute
项目:Machine-Learning-Projects    作者:poke19962008    | 项目源码 | 文件源码
def learn(fName, features, nRows=-1):
    with open('bin/train.bin', 'r') as f:
        train = np.load(f)

        x = np.mat(train[:nRows,timbreVector[features[0]]]).reshape(nRows,1)
        y = np.mat(train[:nRows,timbreVector[features[1]]]).reshape(nRows,1)
        z = np.mat(train[:nRows,timbreVector[features[2]]]).reshape(nRows,1)

        X = np.concatenate((x, y, z), axis=1)
        Y = train[:nRows,0] % minYear

        clf = svm.SVC(verbose=3)
        clf.fit(X, Y)
        print "[SUCCESS] Fitted training data to SVM (kernel: rbf)."

        print "[STARTED] Dumping classifier."
        joblib.dump(clf, 'bin/%s'%fName)
        print "[SUCCESS] Dumped to ", fName
项目:Machine-Learning-Projects    作者:poke19962008    | 项目源码 | 文件源码
def test(fName, features, nRows):
    with open('bin/train.bin') as f:
        test = np.load(f)

        x = np.mat(test[:nRows,timbreVector[features[0]]]).reshape(nRows,1)
        y = np.mat(test[:nRows,timbreVector[features[1]]]).reshape(nRows,1)
        z = np.mat(test[:nRows,timbreVector[features[2]]]).reshape(nRows,1)

        X = np.concatenate((x, y, z), axis=1)
        Y = test[:nRows,0]
        pred = predict(fName, X)

        print "Mean Square Error: ", np.mean(0.5*np.square(pred - Y))
        print "Absolute Error: ", np.mean(np.absolute(pred-Y))

        plt.scatter(Y, pred-Y, marker='o')
        plt.xlabel('Actual')
        plt.ylabel('Difference')
        plt.show()
项目:Machine-Learning-Projects    作者:poke19962008    | 项目源码 | 文件源码
def learn(X, Y, datapoint):
    global alpha

    datapoint = np.mat(datapoint)
    Y = np.mat(Y)
    X = np.mat(X)

    weights = getWeights(X, datapoint)

    den = (X*weights)*X.T
    num = (X*weights)*Y.T

    try:
        return num*den.I
    except:
        return None
项目:flight-data-processor    作者:junzis    | 项目源码 | 文件源码
def filter(self, X, Y):
        if self.interpolate:
            X, Y = self.simplefill(X, Y)
        else:
            X, Y = self.sortxy(X, Y)

        order_range = list(range(self.order+1))
        half_window = (self.window_size - 1) // 2
        # precompute coefficients
        b = np.mat([[k**i for i in order_range]
                    for k in range(-half_window, half_window+1)])
        m = np.linalg.pinv(b).A[self.deriv]
        # pad the signal at the extremes with
        # values taken from the signal itself
        firstvals = Y[0] - np.abs(Y[1:half_window+1][::-1] - Y[0])
        lastvals = Y[-1] + np.abs(Y[-half_window-1:-1][::-1] - Y[-1])
        Y1 = np.concatenate((firstvals, Y, lastvals))
        Y2 = np.convolve(m, Y1, mode='valid')

        return X, Y2
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def gs_numpy( method, X, Y, alphas_log = (-1, 1, 9), n_splits=5, n_jobs = -1, disp = True):
    """
    Grid search method with numpy array of X and Y
    Previously, np.mat are used for compatible with Matlab notation.    
    """
    if disp:
        print( X.shape, Y.shape)

    clf = getattr( linear_model, method)()
    parmas = {'alpha': np.logspace( *alphas_log)}
    kf5_c = model_selection.KFold( n_splits = n_splits, shuffle=True)
    #kf5 = kf5_c.split( X)
    gs = model_selection.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5_c, n_jobs = n_jobs)

    gs.fit( X, Y)

    return gs
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def mlr_show( clf, RMv, yEv, disp = True, graph = True):
    yEv_calc = clf.predict( RMv)

    if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
        yEv_calc = np.mat( yEv_calc).T

    r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
    if graph:
        plt.figure()
        ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
        plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
        ax = plt.gca()
        lims = [
            np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
            np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
        ]
        # now plot both limits against eachother
        #ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
        ax.plot(lims, lims, '-', color = 'pink')
        plt.xlabel('Experiment')
        plt.ylabel('Prediction')
        plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
        plt.show()

    return r_sqr, RMSE
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def mlr_show3( clf, RMv, yEv, disp = True, graph = True):
    yEv_calc = clf.predict( RMv)

    if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
        yEv_calc = np.mat( yEv_calc).T

    r_sqr, RMSE, aae = jchem.estimate_accuracy3( yEv, yEv_calc, disp = disp)
    if graph:
        plt.figure()
        ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
        plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
        ax = plt.gca()
        lims = [
            np.min([ax.get_xlim(), ax.get_ylim()]),  # min of both axes
            np.max([ax.get_xlim(), ax.get_ylim()]),  # max of both axes
        ]
        # now plot both limits against eachother
        #ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
        ax.plot(lims, lims, '-', color = 'pink')
        plt.xlabel('Experiment')
        plt.ylabel('Prediction')
        plt.title( '$r^2$={0:.2e}, RMSE={1:.2e}, AAE={2:.2e}'.format( r_sqr, RMSE, aae))
        plt.show()

    return r_sqr, RMSE, aae
项目:jamespy_py3    作者:jskDr    | 项目源码 | 文件源码
def ann_val_post( yE, disp = True, graph = True, rate = 2, more_train = True, center = None):
    """
    After ann_pre and shell command, ann_post can be used.
    """
    df_ann = pd.read_csv( 'ann_out.csv')
    yE_c = np.mat( df_ann['out'].tolist()).T

    yEt, yEt_c, yEv, yEv_c = jchem.get_valid_mode_data( yE, yE_c, rate = rate, more_train = more_train, center = center)

    print('Trainig result')
    ann_show( yEt, yEt_c, disp = disp, graph = graph)

    print('Validation result')
    r_sqr, RMSE = ann_show( yEv, yEv_c, disp = disp, graph = graph)

    return r_sqr, RMSE