Python numpy 模块,transpose() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.transpose()

项目:FCN_train    作者:315386775    | 项目源码 | 文件源码
def preprocess(image):
    """Takes an image and apply preprocess"""
    # ????????????
    image = cv2.resize(image, (data_shape, data_shape))
    # ?? BGR ? RGB
    image = image[:, :, (2, 1, 0)]
    # ?mean?????float
    image = image.astype(np.float32)
    # ? mean
    image -= np.array([123, 117, 104])
    # ??? [batch-channel-height-width]
    image = np.transpose(image, (2, 0, 1))
    image = image[np.newaxis, :]
    # ?? ndarray
    image = nd.array(image)
    return image
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def backPropagate(Z1, Z2, y, W2, b2):
    ## YOUR CODE HERE ##
    E2 = 0
    E1 = 0
    Eb1 = 0

    # E2 is the error in output layer. To find it we should exract estimated value from actual output.
    # We should find 5 error because there are 5 node in output layer.
    E2 = Z2 - y

    ## E1 is the error in the hidden layer. To find it we should use the error that we found in output layer and the weights between
    ## output and hidden layer
    ## We should find 30 error because there are 30 node in hidden layer.
    E1 = np.dot(W2, np.transpose(E2))

    ## Eb1 is the error bias for hidden layer. To find it we should use the error that we found in output layer and the weights between
    ## output and bias layer
    ## We should find 1 error because there are 1 bias node in hidden layer.
    Eb1 = np.dot(b2, np.transpose(E2))
    ####################
    return E2, E1, Eb1

# calculate the gradients for weights between units and the bias weights
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl[lbl==255] = 0
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = self.encode_segmap(lbl)
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        assert(np.all(classes == np.unique(lbl)))

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def calcGrads(X, Z1, Z2, E1, E2, Eb1):
    ## YOUR CODE HERE ##
    d_W1 = 0
    d_b1 = 0
    d_W2 = 0
    d_b2 = 0


    ## In here we should the derivatives for gradients. To find derivative, we should multiply.

    # d_w2 is the derivative for weights between hidden layer and the output layer.
    d_W2 = np.dot(np.transpose(E2), Z1)
    # d_w1 is the derivative for weights between hidden layer and the input layer.
    d_W1 = np.dot(E1, X)
    # d_b2 is the derivative for weights between hidden layer bias and the output layer.
    d_b2 = np.dot(np.transpose(E2), Eb1)
    # d_b1 is the derivative for weights between hidden layer bias and the input layer.
    d_b1 = np.dot(np.transpose(E1), 1)


    ####################
    return d_W1, d_W2, d_b1, d_b2

# update the weights between units and the bias weights using a learning rate of alpha
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def updateWeights(W1, b1, W2, b2, alpha, d_W1, d_W2, d_b1, d_b2):
    ## YOUR CODE HERE ##
    # W1 = 0
    # b1 = 0
    # W2 = 0
    # b2 = 0

    ## Here we should update weights with usin the result that we found in calcGrads function

    ## W1 is weights between input and the hidden layer
    W1 = W1 - alpha * (np.transpose(d_W1)) # 400*30
    ## W2 is weights between output and the hidden layer
    W2 = W2 - alpha * (np.transpose(d_W2)) # 30*5
    ## b1 is weights between input bias and the hidden layer
    b1 = b1 - alpha * d_b1
    ## b2 is weights between hidden layer bias and the output layer
    b2 = b2 - alpha * (np.transpose(d_b2))
    ####################
    return W1, b1, W2, b2
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def make_heatmaps_from_joints(input_size, heatmap_size, gaussian_variance, batch_joints):
    # Generate ground-truth heatmaps from ground-truth 2d joints
    scale_factor = input_size // heatmap_size
    batch_gt_heatmap_np = []
    for i in range(batch_joints.shape[0]):
        gt_heatmap_np = []
        invert_heatmap_np = np.ones(shape=(heatmap_size, heatmap_size))
        for j in range(batch_joints.shape[1]):
            cur_joint_heatmap = make_gaussian(heatmap_size,
                                              gaussian_variance,
                                              center=(batch_joints[i][j] // scale_factor))
            gt_heatmap_np.append(cur_joint_heatmap)
            invert_heatmap_np -= cur_joint_heatmap
        gt_heatmap_np.append(invert_heatmap_np)
        batch_gt_heatmap_np.append(gt_heatmap_np)
    batch_gt_heatmap_np = np.asarray(batch_gt_heatmap_np)
    batch_gt_heatmap_np = np.transpose(batch_gt_heatmap_np, (0, 2, 3, 1))

    return batch_gt_heatmap_np
项目:evaluation_tools    作者:JSALT-Rosetta    | 项目源码 | 文件源码
def af_h5_to_np(input_path, outpath):

    files = tables.open_file(input_path, mode = 'r+')
    speaker_nodes = files.root._f_list_nodes()

    for spk in speaker_nodes:
        file_nodes = spk._f_list_nodes()
        for fls in file_nodes:
            file_name = fls._v_name
            af_nodes = fls._f_list_nodes()
            af_list = []
            for fts in af_nodes:
                features = fts[:]
                mean = numpy.mean(features,1)
                normalised_feats = list(numpy.transpose(features)/mean)
                af_list += normalised_feats
            numpy.save(outpath + file_name, numpy.array(af_list))
项目:kernel_goodness_of_fit    作者:karlnapf    | 项目源码 | 文件源码
def mahalanobis_distance(difference, num_random_features):
    num_samples, _ = np.shape(difference)
    sigma = np.cov(np.transpose(difference))

    mu = np.mean(difference, 0)

    if num_random_features == 1:
        stat = float(num_samples * mu ** 2) / float(sigma)
    else:
        try:
            linalg.inv(sigma)
        except LinAlgError:
            print('covariance matrix is singular. Pvalue returned is 1.1')
            warnings.warn('covariance matrix is singular. Pvalue returned is 1.1')
            return 0
        stat = num_samples * mu.dot(linalg.solve(sigma, np.transpose(mu)))

    return chi2.sf(stat, num_random_features)
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def sumIntensitiesMeme(
        self,
        t,
        m,
        node_vec,
        etimes,
        filterlatertimes=True,
        ):
        if filterlatertimes:
            I = self.mu * self.gamma[m] \
                + np.dot(np.transpose(self.alpha[node_vec[etimes
                         < t].astype(int), :][:, range(self.D)]),
                         self.kernel_evaluate(t, etimes[etimes < t],
                         self.omega))
        else:
            I = self.mu * self.gamma[m] \
                + np.dot(np.transpose(self.alpha[node_vec.astype(int), :
                         ][:, range(self.D)]), self.kernel_evaluate(t,
                         etimes, self.omega))
        sumI = np.sum(I)
        return (I, sumI)
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def sumIntensitiesAll(
        self,
        t,
        node_vec,
        etimes,
        filterlatertimes=False,
        ):
        if filterlatertimes:
            I = self.mu * np.sum(self.gamma) \
                + np.dot(np.transpose(self.alpha[node_vec[etimes
                         < t].astype(int), :][:, range(self.D)]),
                         self.kernel_evaluate(t, etimes[etimes < t],
                         self.omega))
        else:
            I = self.mu * np.sum(self.gamma) \
                + np.dot(np.transpose(self.alpha[node_vec.astype(int), :
                         ][:, range(self.D)]), self.kernel_evaluate(t,
                         etimes, self.omega))
        sumI = np.sum(I)
        return (I, sumI)
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def _intensityUserMeme(
        self,
        t,
        d,
        m,
        filterlatertimes=False,
        ):
        etimes = self.etimes[self.eventmemes == m]
        node_vec = self.node_vec[self.eventmemes == m]
        if filterlatertimes:
            return self.mu[d] * self.gamma[m] \
                + np.dot(np.transpose(self.alpha[node_vec[etimes
                         < t].astype(int), :][:, d]),
                         self.kernel_evaluate(t, etimes[etimes < t],
                         self.omega))
        else:
            return self.mu[d] * self.gamma[m] \
                + np.dot(np.transpose(self.alpha[node_vec.astype(int), :
                         ][:, d]), self.kernel_evaluate(t, etimes,
                         self.omega))
项目:EmotiW-2017-Audio-video-Emotion-Recognition    作者:xujinchang    | 项目源码 | 文件源码
def load_X(X_signals_paths):
    """
    Given attribute (train or test) of feature, read all 9 features into an
    np ndarray of shape [sample_sequence_idx, time_step, feature_num]
        argument:   X_signals_paths str attribute of feature: 'train' or 'test'
        return:     np ndarray, tensor of features
    """
    X_signals = []

    for signal_type_path in X_signals_paths:
        file = open(signal_type_path, 'rb')
        # Read dataset from disk, dealing with text files' syntax
        X_signals.append(
            [np.array(serie, dtype=np.float32) for serie in [
                row.replace('  ', ' ').strip().split(' ') for row in file
            ]]
        )
        file.close()

    return np.transpose(np.array(X_signals), (1, 2, 0))
项目:speed    作者:keon    | 项目源码 | 文件源码
def get_batcher(self, shuffle=True, augment=True):
        """ produces batch generator """
        w, h = self.resize

        if shuffle: np.random.shuffle(self.data)
        data = iter(self.data)
        while True:
            x = np.zeros((self.batch_size, self.timesteps, h, w, 3))
            y = np.zeros((self.batch_size, 1))
            for b in range(self.batch_size):
                images, label = next(data)
                for t, img_name in enumerate(images):
                    image_path = self.folder + 'images/' + img_name
                    img = cv2.imread(image_path)
                    img = img[190:350, 100:520] # crop
                    if augment:
                        img = aug.augment_image(img) # augmentation
                    img = cv2.resize(img.copy(), (w, h))
                    x[b, t] = img
                y[b] = label
            x = np.transpose(x, [0, 4, 1, 2, 3])
            yield x, y
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def _random_op(sites, ldim, hermitian=False, normalized=False, randstate=None,
               dtype=np.complex_):
    """Returns a random operator  of shape (ldim,ldim) * sites with local
    dimension `ldim` living on `sites` sites in global form.

    :param sites: Number of local sites
    :param ldim: Local ldimension
    :param hermitian: Return only the hermitian part (default False)
    :param normalized: Normalize to Frobenius norm=1 (default False)
    :param randstate: numpy.random.RandomState instance or None
    :returns: numpy.ndarray of shape (ldim,ldim) * sites

    >>> A = _random_op(3, 2); A.shape
    (2, 2, 2, 2, 2, 2)
    """
    op = _randfuncs[dtype]((ldim**sites,) * 2, randstate=randstate)
    if hermitian:
        op += np.transpose(op).conj()
    if normalized:
        op /= np.linalg.norm(op)
    return op.reshape((ldim,) * 2 * sites)
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def transpose(self, axes=None):
        """Transpose (=reverse order of) physical legs on each site

        :param axes: New order of the physical axes. If ``None`` is passed,
            we reverse the order of the legs on each site. (default ``None``)

        >>> from .factory import random_mpa
        >>> mpa = random_mpa(2, (2, 3, 4), 2)
        >>> mpa.shape
        ((2, 3, 4), (2, 3, 4))
        >>> mpa.transpose((2, 0, 1)).shape
        ((4, 2, 3), (4, 2, 3))

        """
        ltens = LocalTensors((_local_transpose(tens, axes) for tens in self.lt),
                             cform=self.canonical_form)
        return type(self)(ltens)
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        d1len = len(self.data1[t1])
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[self.data1[t1]]
            t2_rep = self.embed[self.data2[t2]]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            curr_pair_feats = list(self.hist_feats[(t1, t2)])
            caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:MatchZoo    作者:faneshion    | 项目源码 | 文件源码
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
        mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
        t1_cont = list(self.data1[t1])
        t2_cont = list(self.data2[t2])
        d1len = len(t1_cont)
        if self.use_hist_feats:
            assert (t1, t2) in self.hist_feats
            curr_pair_feats = list(self.hist_feats[(t1, t2)])
            caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
            if d1len < data1_maxlen:
                mhist[:d1len, :] = caled_hist[:, :]
            else:
                mhist[:, :] = caled_hist[:data1_maxlen, :]
        else:
            t1_rep = self.embed[t1_cont]
            t2_rep = self.embed[t2_cont]
            mm = t1_rep.dot(np.transpose(t2_rep))
            for (i,j), v in np.ndenumerate(mm):
                if i >= data1_maxlen:
                    break
                vid = int((v + 1.) / 2. * ( hist_size - 1.))
                mhist[i][vid] += 1.
            mhist += 1.
            mhist = np.log10(mhist)
        return mhist
项目:AerialCrackDetection_Keras    作者:TTMRonald    | 项目源码 | 文件源码
def format_img(img, C):
    img_min_side = float(C.im_size)
    (height,width,_) = img.shape

    if width <= height:
        f = img_min_side/width
        new_height = int(f * height)
        new_width = int(img_min_side)
    else:
        f = img_min_side/height
        new_width = int(f * width)
        new_height = int(img_min_side)
    fx = width/float(new_width)
    fy = height/float(new_height)
    img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
    img = img[:, :, (2, 1, 0)]
    img = img.astype(np.float32)
    img[:, :, 0] -= C.img_channel_mean[0]
    img[:, :, 1] -= C.img_channel_mean[1]
    img[:, :, 2] -= C.img_channel_mean[2]
    img /= C.img_scaling_factor
    img = np.transpose(img, (2, 0, 1))
    img = np.expand_dims(img, axis=0)
    return img, fx, fy
项目:nn4nlp-code    作者:neubig    | 项目源码 | 文件源码
def calc_score_of_histories(words, dropout=0.0):
  # This will change from a list of histories, to a list of words in each history position
  words = np.transpose(words)
  # Lookup the embeddings and concatenate them
  emb = dy.concatenate([dy.lookup_batch(W_emb, x) for x in words])
  # Create the hidden layer
  W_h = dy.parameter(W_h_p)
  b_h = dy.parameter(b_h_p)
  h = dy.tanh(dy.affine_transform([b_h, W_h, emb]))
  # Perform dropout
  if dropout != 0.0:
    h = dy.dropout(h, dropout)
  # Calculate the score and return
  W_sm = dy.parameter(W_sm_p)
  b_sm = dy.parameter(b_sm_p)
  return dy.affine_transform([b_sm, W_sm, h])

# Calculate the loss value for the entire sentence
项目:droppy    作者:BV-DR    | 项目源码 | 文件源码
def reSample( df , dt = None , xAxis = None , n = None , kind = 'linear') :
   """ re-sample the signal """

   if type(df) == pd.Series : df = pd.DataFrame(df)

   f = interp1d( df.index, np.transpose(df.values) , kind=kind, axis=-1, copy=True, bounds_error=True, assume_sorted=True)
   if dt :
      end = int(+(df.index[-1] - df.index[0] ) / dt)  * dt +  df.index[0]
      xAxis = np.linspace( df.index[0] , end , 1+int(+(end - df.index[0] ) / dt) )
   elif n :
      xAxis = np.linspace( df.index[0] ,  df.index[-1] , n )
   elif xAxis == None :
      raise(Exception("reSample : either dt or xAxis should be provided" ))

   #For rounding issue, ensure that xAxis is within ts.xAxis
   #xAxis[ np.where( xAxis > np.max(df.index[:]) ) ] = df.index[ np.where( xAxis > np.max(df.index[:]) ) ]
   return pd.DataFrame( data = np.transpose(f(xAxis)), index = xAxis , columns = map( lambda x : "reSample("+ x +")" , df.columns  ) )
项目:droppy    作者:BV-DR    | 项目源码 | 文件源码
def getPSD( df , dw = 0.05, roverlap = 0.5, window='hanning', detrend='constant') :
   """
      Compute the power spectral density
   """

   if type(df) == pd.Series : df = pd.DataFrame(df)

   nfft = int ( (2*pi / dw) / dx(df) )
   nperseg = 2**int(log(nfft)/log(2))
   noverlap = nperseg * roverlap

   """ Return the PSD of a time signal """
   try : 
      from scipy.signal import welch
   except :
      raise Exception("Welch function not found, please install scipy > 0.12")

   data = []
   for iSig in range(df.shape[1]) :
      test = welch( df.values[:,iSig]  , fs = 1. / dx(df) , window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=True, scaling='density')
      data.append( test[1] / (2*pi) )
   xAxis = test[0][:] * 2*pi
   return pd.DataFrame( data = np.transpose(data), index = xAxis , columns = [ "psd("+ str(x) +")" for  x in df.columns ]  )
项目:droppy    作者:BV-DR    | 项目源码 | 文件源码
def derivFFT(df, n=1  ) :
   """ Deriv a signal trought FFT, warning, edge can be a bit noisy...
   indexList : channel to derive
   n : order of derivation
   """
   deriv = []
   for iSig in range(df.shape[1]) :
      fft = np.fft.fft( df.values[:,iSig] )   #FFT
      freq = np.fft.fftfreq( df.shape[0] , dx(df) )

      from copy import deepcopy
      fft0 = deepcopy(fft)
      if n>0 :
         fft *= (1j * 2*pi* freq[:])**n                    #Derivation in frequency domain
      else :
         fft[-n:] *= (1j * 2*pi* freq[-n:])**n
         fft[0:-n] = 0.

      tts = np.real(np.fft.ifft(fft))
      tts -= tts[0]
      deriv.append( tts )    #Inverse FFT

   return pd.DataFrame( data = np.transpose(deriv), index = df.index , columns = [ "DerivFFT("+ x +")" for x in df.columns ]  )
项目:autonomio    作者:autonomio    | 项目源码 | 文件源码
def _starts_with_output(data, col):

    '''

    Helper function for to_integers in cases where
    the feature is categorized based on a common
    first character of a string.

    '''

    data[col] = data[col].fillna('0')
    temp_df = _category_starts_with(data, col)
    temp_df['start_char'] = temp_df[0]
    temp_df = temp_df.drop(0, axis=1)
    reference_df = temp_df.set_index('start_char').transpose()
    temp_list = []
    for i in range(len(data[col])):
        for c in temp_df['start_char']:
            if data[col][i].startswith(c) == True:
                temp_list.append(reference_df[c][0])
    if len(data[col]) != len(temp_list):
        print "AUTONOMIO ERROR: length of input and output do not match"
    else:
        return pd.Series(temp_list)
项目:seam_carving    作者:dharness    | 项目源码 | 文件源码
def reduce_height(img4, eng):
    """
    Reduces the height by 1 pixel

    Args:
        img4 (n,m,4 numpy matrix): RGB image with additional mask layer.
        eng (n,m numpy matrix): Pre-computed energy matrix for supplied image.

    Returns:
        tuple (
            n,1 numpy matrix: the removed seam,
            n-1,m,4 numpy matrix: The height-redcued image,
            float: The cost of the seam removed
        )
    """
    flipped_eng = np.transpose(eng)
    flipped_img4 = np.transpose(img4, (1, 0, 2))
    flipped_seam, reduced_flipped_img4, cost = reduce_width(flipped_img4, flipped_eng)
    return (
        np.transpose(flipped_seam),
        np.transpose(reduced_flipped_img4, (1, 0, 2)),
        cost
    )
项目:keras-frcnn    作者:yhenon    | 项目源码 | 文件源码
def format_img(img, C):
    img_min_side = float(C.im_size)
    (height,width,_) = img.shape

    if width <= height:
        f = img_min_side/width
        new_height = int(f * height)
        new_width = int(img_min_side)
    else:
        f = img_min_side/height
        new_width = int(f * width)
        new_height = int(img_min_side)
    fx = width/float(new_width)
    fy = height/float(new_height)
    img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
    img = img[:, :, (2, 1, 0)]
    img = img.astype(np.float32)
    img[:, :, 0] -= C.img_channel_mean[0]
    img[:, :, 1] -= C.img_channel_mean[1]
    img[:, :, 2] -= C.img_channel_mean[2]
    img /= C.img_scaling_factor
    img = np.transpose(img, (2, 0, 1))
    img = np.expand_dims(img, axis=0)
    return img, fx, fy
项目:DBCV    作者:christopherjenness    | 项目源码 | 文件源码
def _mutual_reach_dist_MST(dist_tree):
    """
    Computes minimum spanning tree of the mutual reach distance complete graph

    Args:
        dist_tree (np.ndarray): array of dimensions (n_samples, n_samples)
            Graph of all pair-wise mutual reachability distances
            between points.

    Returns: minimum_spanning_tree (np.ndarray)
        array of dimensions (n_samples, n_samples)
        minimum spanning tree of all pair-wise mutual reachability
            distances between points.
    """
    mst = minimum_spanning_tree(dist_tree).toarray()
    return mst + np.transpose(mst)
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def writeModelUBC(mesh, fileName, model):
        """Writes a model associated with a TensorMesh
        to a UBC-GIF format model file.

        :param string fileName: File to write to
        :param numpy.ndarray model: The model
        """

        # Reshape model to a matrix
        modelMat = mesh.r(model, 'CC', 'CC', 'M')
        # Transpose the axes
        modelMatT = modelMat.transpose((2, 0, 1))
        # Flip z to positive down
        modelMatTR = utils.mkvc(modelMatT[::-1, :, :])

        np.savetxt(fileName, modelMatTR.ravel())
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_inner_product_with_various_contiguities(self):
        # github issue 6532
        for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
            # check an inner product involving a matrix transpose
            A = np.array([[1, 2], [3, 4]], dtype=dt)
            B = np.array([[1, 3], [2, 4]], dtype=dt)
            C = np.array([1, 1], dtype=dt)
            desired = np.array([4, 6], dtype=dt)
            assert_equal(np.inner(A.T, C), desired)
            assert_equal(np.inner(C, A.T), desired)
            assert_equal(np.inner(B, C), desired)
            assert_equal(np.inner(C, B), desired)
            # check a matrix product
            desired = np.array([[7, 10], [15, 22]], dtype=dt)
            assert_equal(np.inner(A, B), desired)
            # check the syrk vs. gemm paths
            desired = np.array([[5, 11], [11, 25]], dtype=dt)
            assert_equal(np.inner(A, A), desired)
            assert_equal(np.inner(A, A.copy()), desired)
            # check an inner product involving an aliased and reversed view
            a = np.arange(5).astype(dt)
            b = a[::-1]
            desired = np.array(10, dtype=dt).item()
            assert_equal(np.inner(b, a), desired)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_TakeTransposeInnerOuter(self):
        # Test of take, transpose, inner, outer products
        x = arange(24)
        y = np.arange(24)
        x[5:6] = masked
        x = x.reshape(2, 3, 4)
        y = y.reshape(2, 3, 4)
        assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
        assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
        assert_equal(np.inner(filled(x, 0), filled(y, 0)),
                     inner(x, y))
        assert_equal(np.outer(filled(x, 0), filled(y, 0)),
                     outer(x, y))
        y = array(['abc', 1, 'def', 2, 3], object)
        y[2] = masked
        t = take(y, [0, 3, 4])
        assert_(t[0] == 'abc')
        assert_(t[1] == 2)
        assert_(t[2] == 3)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_generic_methods(self):
        # Tests some MaskedArray methods.
        a = array([1, 3, 2])
        assert_equal(a.any(), a._data.any())
        assert_equal(a.all(), a._data.all())
        assert_equal(a.argmax(), a._data.argmax())
        assert_equal(a.argmin(), a._data.argmin())
        assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))
        assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))
        assert_equal(a.conj(), a._data.conj())
        assert_equal(a.conjugate(), a._data.conjugate())

        m = array([[1, 2], [3, 4]])
        assert_equal(m.diagonal(), m._data.diagonal())
        assert_equal(a.sum(), a._data.sum())
        assert_equal(a.take([1, 2]), a._data.take([1, 2]))
        assert_equal(m.transpose(), m._data.transpose())
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testTakeTransposeInnerOuter(self):
        # Test of take, transpose, inner, outer products
        x = arange(24)
        y = np.arange(24)
        x[5:6] = masked
        x = x.reshape(2, 3, 4)
        y = y.reshape(2, 3, 4)
        assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
        assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
        assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
                   inner(x, y)))
        assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
                   outer(x, y)))
        y = array(['abc', 1, 'def', 2, 3], object)
        y[2] = masked
        t = take(y, [0, 3, 4])
        assert_(t[0] == 'abc')
        assert_(t[1] == 2)
        assert_(t[2] == 3)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testArrayMethods(self):
        a = array([1, 3, 2])
        self.assertTrue(eq(a.any(), a._data.any()))
        self.assertTrue(eq(a.all(), a._data.all()))
        self.assertTrue(eq(a.argmax(), a._data.argmax()))
        self.assertTrue(eq(a.argmin(), a._data.argmin()))
        self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
                           a._data.choose(0, 1, 2, 3, 4)))
        self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
        self.assertTrue(eq(a.conj(), a._data.conj()))
        self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
        m = array([[1, 2], [3, 4]])
        self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
        self.assertTrue(eq(a.sum(), a._data.sum()))
        self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
        self.assertTrue(eq(m.transpose(), m._data.transpose()))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_4(self):
        """
        Test of take, transpose, inner, outer products.

        """
        x = self.arange(24)
        y = np.arange(24)
        x[5:6] = self.masked
        x = x.reshape(2, 3, 4)
        y = y.reshape(2, 3, 4)
        assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
        assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
        assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
                            self.inner(x, y))
        assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
                            self.outer(x, y))
        y = self.array(['abc', 1, 'def', 2, 3], object)
        y[2] = self.masked
        t = self.take(y, [0, 3, 4])
        assert t[0] == 'abc'
        assert t[1] == 2
        assert t[2] == 3
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        import numpy.linalg as linalg

        A = np.array([[1., 2.],
                      [3., 4.]])
        mA = matrix(A)
        assert_(np.allclose(linalg.inv(A), mA.I))
        assert_(np.all(np.array(np.transpose(A) == mA.T)))
        assert_(np.all(np.array(np.transpose(A) == mA.H)))
        assert_(np.all(A == mA.A))

        B = A + 2j*A
        mB = matrix(B)
        assert_(np.allclose(linalg.inv(B), mB.I))
        assert_(np.all(np.array(np.transpose(B) == mB.T)))
        assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def preprocess_vgg19_mil(Image):
    if len(Image.shape) == 2:
        Image = Image[:, :, np.newaxis]
        Image = np.concatenate((Image, Image, Image), axis=2)

    mean = np.array([[[103.939, 116.779, 123.68]]]);
    base_image_size = 565;
    Image = cv2.resize(np.transpose(Image, axes=(1, 2, 0)), (base_image_size, base_image_size), interpolation=cv2.INTER_CUBIC)
    Image_orig = Image.astype(np.float32, copy=True)
    Image_orig -= mean
    im = Image_orig
    #im, gr, grr = upsample_image(Image_orig, base_image_size)
    # im = cv2.resize(Image_orig, (base_image_size, base_image_size), interpolation=cv2.INTER_CUBIC)
    im = np.transpose(im, axes=(2, 0, 1))
    im = im[np.newaxis, :, :, :]
    return im
项目:MIL.pytorch    作者:gujiuxiang    | 项目源码 | 文件源码
def test_img(im, net, base_image_size, means):
    """
    Calls Caffe to get output for this image
    """
    batch_size = 1
    # Resize image
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= means

    im, gr, grr = upsample_image(im_orig, base_image_size)
    im = np.transpose(im, axes=(2, 0, 1))
    im = im[np.newaxis, :, :, :]

    # Pass into model
    mil_prob = net(Variable(torch.from_numpy(im), requires_grad=False).cuda())
    return mil_prob
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
    """
    Build a covariance matrix for a 2D multivariate Gaussian

    --- INPUT ---
    sigmax          Standard deviation of the x-compoent of the multivariate Gaussian
    sigmay          Standard deviation of the y-compoent of the multivariate Gaussian
    angle           Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
    verbose         Toggle verbosity
    --- EXAMPLE OF USE ---
    import tdose_utilities as tu
    covmatrix = tu.build_2D_cov_matrix(3,1,35)

    """
    if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
                      ') and then rotated '+str(angle)+' degrees'
    cov_orig      = np.zeros([2,2])
    cov_orig[0,0] = sigmay**2.0
    cov_orig[1,1] = sigmax**2.0

    angle_rad     = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
    c, s          = np.cos(angle_rad), np.sin(angle_rad)
    rotmatrix     = np.matrix([[c, -s], [s, c]])

    cov_rot       = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix))  # performing rot * cov * rot^T

    return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_face_mask(img, img_l):
    img = np.zeros(img.shape[:2], dtype = np.float64)

    for idx in OVERLAY_POINTS_IDX:
        cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1)

    img = np.array([img, img, img]).transpose((1, 2, 0))
    img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0
    img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0)

    return img
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_tm_opp(pts1, pts2):
    # Transformation matrix - ( Translation + Scaling + Rotation )
    # using Procuster analysis
    pts1 = np.float64(pts1)
    pts2 = np.float64(pts2)

    m1 = np.mean(pts1, axis = 0)
    m2 = np.mean(pts2, axis = 0)

    # Removing translation
    pts1 -= m1
    pts2 -= m2

    std1 = np.std(pts1)
    std2 = np.std(pts2)
    std_r = std2/std1

    # Removing scaling
    pts1 /= std1
    pts2 /= std2

    U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)

    # Finding the rotation matrix
    R = np.transpose(U * V)

    return np.vstack([np.hstack((std_r * R,
        np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def show_heatmap(x, y, attention):
    #print attention[:len(y),:len(x)]
    #print attention[:len(y),:len(x)].shape
    #data = np.transpose(attention[:len(y),:len(x)])
    data = attention[:len(y),:len(x)]
    x, y = y, x

    #ax = plt.axes(aspect=0.4)
    ax = plt.axes()
    heatmap = plt.pcolor(data, cmap=plt.cm.Blues)

    xticks = np.arange(len(y)) + 0.5
    xlabels = y
    yticks = np.arange(len(x)) + 0.5
    ylabels = x
    plt.xticks(xticks, xlabels, rotation='vertical')
    ax.set_yticks(yticks)
    ax.set_yticklabels(ylabels)

    # make it look less like a scatter plot and more like a colored table
    ax.tick_params(axis='both', length=0)
    ax.invert_yaxis()
    ax.xaxis.tick_top()

    plt.colorbar(heatmap)

    plt.show()
    #plt.savefig('./attention-out.pdf')
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def alterneigh(self, alpha, rad, i, b, g, r):
        if i-rad >= self.SPECIALS-1:
            lo = i-rad
            start = 0
        else:
            lo = self.SPECIALS-1
            start = (self.SPECIALS-1 - (i-rad))

        if i+rad <= self.NETSIZE:
            hi = i+rad
            end = rad*2-1
        else:
            hi = self.NETSIZE
            end = (self.NETSIZE - (i+rad))

        a = self.geta(alpha, rad)[start:end]

        p = self.network[lo+1:hi]
        p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)

    #def contest(self, b, g, r):
    #    """ Search for biased BGR values
    #            Finds closest neuron (min dist) and updates self.freq
    #            finds best neuron (min dist-self.bias) and returns position
    #            for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
    #            self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
    #
    #    i, j = self.SPECIALS, self.NETSIZE
    #    dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
    #    bestpos = i + np.argmin(dists)
    #    biasdists = dists - self.bias[i:j]
    #    bestbiaspos = i + np.argmin(biasdists)
    #    self.freq[i:j] -= self.BETA * self.freq[i:j]
    #    self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
    #    self.freq[bestpos] += self.BETA
    #    self.bias[bestpos] -= self.BETAGAMMA
    #    return bestbiaspos
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = img.astype(float) / 255.0
        # NHWC -> NCHW
        img = img.transpose(2, 0, 1)

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        """transform

        :param img:
        :param lbl:
        """
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)

        if not np.all(classes == np.unique(lbl)):
            print("WARN: resizing labels yielded fewer classes")

        if not np.all(np.unique(lbl) < self.n_classes):
            raise ValueError("Segmentation map contained invalid class values")

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()

        return img, lbl
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def set_anchors(mc):
  H, W, B = 13, 18, 9
  anchor_shapes = np.reshape(
      [np.array(
          [[  36.,  37.], [ 366., 174.], [ 115.,  59.],
           [ 162.,  87.], [  38.,  90.], [ 258., 173.],
           [ 224., 108.], [  78., 170.], [  72.,  43.]])] * H * W,
      (H, W, B, 2)
  )
  center_x = np.reshape(
      np.transpose(
          np.reshape(
              np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
              (B, H, W)
          ),
          (1, 2, 0)
      ),
      (H, W, B, 1)
  )
  center_y = np.reshape(
      np.transpose(
          np.reshape(
              np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
              (B, W, H)
          ),
          (2, 1, 0)
      ),
      (H, W, B, 1)
  )
  anchors = np.reshape(
      np.concatenate((center_x, center_y, anchor_shapes), axis=3),
      (-1, 4)
  )

  return anchors
项目:squeezeDet-hand    作者:fyhtea    | 项目源码 | 文件源码
def set_anchors(mc):
  H, W, B = 14, 19, 9
  anchor_shapes = np.reshape(
      [np.array(
          [[  36.,  37.], [ 366., 174.], [ 115.,  59.],
           [ 162.,  87.], [  38.,  90.], [ 258., 173.],
           [ 224., 108.], [  78., 170.], [  72.,  43.]])] * H * W,
      (H, W, B, 2)
  )
  center_x = np.reshape(
      np.transpose(
          np.reshape(
              np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
              (B, H, W)
          ),
          (1, 2, 0)
      ),
      (H, W, B, 1)
  )
  center_y = np.reshape(
      np.transpose(
          np.reshape(
              np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
              (B, W, H)
          ),
          (2, 1, 0)
      ),
      (H, W, B, 1)
  )
  anchors = np.reshape(
      np.concatenate((center_x, center_y, anchor_shapes), axis=3),
      (-1, 4)
  )

  return anchors
项目:MMD-Variational-Autoencoder    作者:ShengjiaZhao    | 项目源码 | 文件源码
def convert_to_display(samples):
    cnt, height, width = int(math.floor(math.sqrt(samples.shape[0]))), samples.shape[1], samples.shape[2]
    samples = np.transpose(samples, axes=[1, 0, 2, 3])
    samples = np.reshape(samples, [height, cnt, cnt, width])
    samples = np.transpose(samples, axes=[1, 0, 2, 3])
    samples = np.reshape(samples, [height*cnt, width*cnt])
    return samples