Python numpy 模块,argmin() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.argmin()

项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def fftfilt(b, x, *n):
    N_x = len(x)
    N_b = len(b)
    N = 2**np.arange(np.ceil(np.log2(N_b)),np.floor(np.log2(N_x)))
    cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
    N_fft = int(N[np.argmin(cost)])
    N_fft = int(N_fft)    
    # Compute the block length:
    L = int(N_fft - N_b + 1)
    # Compute the transform of the filter:
    H = np.fft.fft(b,N_fft)
    y = np.zeros(N_x, x.dtype)
    i = 0
    while i <= N_x:
        il = np.min([i+L,N_x])
        k = np.min([i+N_fft,N_x])
        yt = np.fft.ifft(np.fft.fft(x[i:il],N_fft)*H,N_fft) # Overlap..
        y[i:k] = y[i:k] + yt[:k-i]                          # and add
        i += L
    return y
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def contest(self, b, g, r):
        """ Search for biased BGR values
                Finds closest neuron (min dist) and updates self.freq
                finds best neuron (min dist-self.bias) and returns position
                for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
                self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
        i, j = self.SPECIALS, self.NETSIZE
        dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
        bestpos = i + np.argmin(dists)
        biasdists = dists - self.bias[i:j]
        bestbiaspos = i + np.argmin(biasdists)
        self.freq[i:j] *= (1-self.BETA)
        self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
        self.freq[bestpos] += self.BETA
        self.bias[bestpos] -= self.BETAGAMMA
        return bestbiaspos
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def rasterMaskToGrid( rasterMask ):
    grid = []
    mask = rasterMask['mask']
    for y in range(rasterMask['height']):
        for x in range(rasterMask['width']):
            if mask[y,x]==0:
                grid.append([x,y])

    grid = np.array(grid,dtype=np.float)
    if not (rasterMask is None) and rasterMask['hex'] is True:
        f = math.sqrt(3.0)/2.0 
        offset = -0.5
        if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
            offset = 0.5
        for i in range(len(grid)):
            if (grid[i][1]%2.0==0.0):
                grid[i][0]-=offset
            grid[i][1] *= f
    return grid
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def update_waveforms(self):

        self.waveforms_ax.clear()

        for idx, p in enumerate(self.to_consider[list(self.inspect_templates)]):
            tmp   = self.templates[:, p]
            tmp   = tmp.toarray().reshape(self.N_e, self.N_t)
            elec  = numpy.argmin(numpy.min(tmp, 1))
            thr   = self.thresholds[elec]

            if self.ui.show_peaks.isChecked():
                indices = [self.inv_nodes[self.nodes[elec]]]
            else:
                indices = self.inv_nodes[self.edges[self.nodes[elec]]]

            for sidx in indices:
                xaxis = numpy.linspace(self.x_position[sidx], self.x_position[sidx] + (self.N_t/(self.sampling_rate*1e-3)), self.N_t)
                self.waveforms_ax.plot(xaxis, self.y_position[sidx] + tmp[sidx], c=colorConverter.to_rgba(self.inspect_colors_templates[idx]))
                #self.waveforms_ax.plot([0, xaxis[-1]], [-thr, -thr], c=colorConverter.to_rgba(self.inspect_colors_templates[idx]), linestyle='--')

        self.waveforms_ax.set_xlabel('Probe Space')
        self.waveforms_ax.set_ylabel('Probe Space')

        for fig in [self.ui.waveforms]:
            fig.draw_idle()
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def run_forests():    
    print('random forest: \n')   
    params = []
    scores = []

    for _ in range(5):
        max_features = np.random.randint(400,800)
        max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
        forest = RandomForestClassifier(n_estimators=50,
                                        max_features=max_features,
                                        max_depth=max_depth)                                   
        forest_fit = forest.fit(X_train, Y_train)
        pred = forest_fit.predict(X_test)
        print('\n params:', dict(max_features=max_features, max_depth=max_depth))
        print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
                  zero_one_score(Y_test, pred))

        params.append( (max_features, max_depth) )
        scores.append( zero_one_score(Y_test, pred))

    print('best:', params[np.argmin(scores)])
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def run_forests():    
    print('random forest: \n')   
    params = []
    scores = []

    for _ in range(5):
        max_features = np.random.randint(400,800)
        max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
        forest = RandomForestClassifier(n_estimators=50,
                                        max_features=max_features,
                                        max_depth=max_depth)                                   
        forest_fit = forest.fit(X_train, Y_train)
        pred = forest_fit.predict(X_test)
        print('\n params:', dict(max_features=max_features, max_depth=max_depth))
        print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
                  zero_one_score(Y_test, pred))

        params.append( (max_features, max_depth) )
        scores.append( zero_one_score(Y_test, pred))

    print('best:', params[np.argmin(scores)])
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def run_forests():    
    print('random forest: \n')   
    params = []
    scores = []

    for _ in range(5):
        max_features = np.random.randint(400,800)
        max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
        forest = RandomForestClassifier(n_estimators=50,
                                        max_features=max_features,
                                        max_depth=max_depth)                                   
        forest_fit = forest.fit(X_train, Y_train)
        pred = forest_fit.predict(X_test)
        print('\n params:', dict(max_features=max_features, max_depth=max_depth))
        print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
                  zero_one_score(Y_test, pred))

        params.append( (max_features, max_depth) )
        scores.append( zero_one_score(Y_test, pred))

    print('best:', params[np.argmin(scores)])
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def run_forests():    
    print('random forest: \n')   
    params = []
    scores = []

    for _ in range(5):
        max_features = np.random.randint(400,800)
        max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
        forest = RandomForestClassifier(n_estimators=50,
                                        max_features=max_features,
                                        max_depth=max_depth)                                   
        forest_fit = forest.fit(X_train, Y_train)
        pred = forest_fit.predict(X_test)
        print('\n params:', dict(max_features=max_features, max_depth=max_depth))
        print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
                  zero_one_score(Y_test, pred))

        params.append( (max_features, max_depth) )
        scores.append( zero_one_score(Y_test, pred))

    print('best:', params[np.argmin(scores)])
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def run_forests():    
    print('random forest: \n')   
    params = []
    scores = []

    for _ in range(5):
        max_features = np.random.randint(400,800)
        max_depth = np.random.choice([None, None, None, None, 30, 40, 60])
        forest = RandomForestClassifier(n_estimators=50,
                                        max_features=max_features,
                                        max_depth=max_depth)                                   
        forest_fit = forest.fit(X_train, Y_train)
        pred = forest_fit.predict(X_test)
        print('\n params:', dict(max_features=max_features, max_depth=max_depth))
        print('forest train: ',zero_one_score(Y_train, forest_fit.predict(X_train)), ' test: ',
                  zero_one_score(Y_test, pred))

        params.append( (max_features, max_depth) )
        scores.append( zero_one_score(Y_test, pred))

    print('best:', params[np.argmin(scores)])
项目:dl4mt-multi    作者:nyu-dl    | 项目源码 | 文件源码
def _translate(seq, f_init, f_next, trg_eos_idx, src_sel, trg_sel,
               k, cond_init_trg, normalize, n_best, **kwargs):
    sample, score = gen_sample(
        f_init, f_next, x=numpy.array(seq).reshape([len(seq), 1]),
        eos_idx=trg_eos_idx, src_selector=src_sel, trg_selector=trg_sel,
        k=k, maxlen=3*len(seq), stochastic=False, argmax=False,
        cond_init_trg=cond_init_trg, **kwargs)
    if normalize:
        lengths = numpy.array([len(s) for s in sample])
        score = score / lengths
    if n_best == 1:
        sidx = numpy.argmin(score)
    elif n_best > 1:
        sidx = numpy.argsort(score)[:n_best]
    else:
        raise ValueError('n_best cannot be negative!')
    return sample[sidx], score[sidx]
项目:conec    作者:cod3licious    | 项目源码 | 文件源码
def doesnt_match(self, words):
        """
        Which word from the given list doesn't go with the others?

        Example::
          >>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
          'cereal'
        """
        words = [word for word in words if word in self.vocab]  # filter out OOV words
        logger.debug("using words %s" % words)
        if not words:
            raise ValueError("cannot select a word from an empty list")
        # which word vector representation is furthest away from the mean?
        selection = self.syn0norm[[self.vocab[word].index for word in words]]
        mean = np.mean(selection, axis=0)
        sim = np.dot(selection, mean / np.linalg.norm(mean))
        return words[np.argmin(sim)]
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def temporalize(x, smoothing_steps, distance='L1'):
    """
    :param x: An (n_samples, n_dims) dataset
    :return: A (n_samples, ) array of indexes that can be used to shuffle the input for temporal smoothness.
    """
    x_flat = x.reshape(x.shape[0], -1)
    index_buffer = np.arange(1, smoothing_steps+1)
    next_sample_buffer = x_flat[1:smoothing_steps+1].copy()
    # Technically, we could do this without a next_sample_buffer (and only an index_buffer), but it would require
    # repeatedly accessing a bunch of really scattered memory, so we do it this way.
    shuffling_indices = np.zeros(len(x), dtype=int)
    rectifier = np.abs if distance=='L1' else np.square if distance=='L2' else bad_value(distance)
    p=ProgressIndicator(len(x), name = 'Temporalize')
    current_index = 0
    for i in xrange(len(x)):
        shuffling_indices[i] = current_index
        closest = np.argmin(rectifier(x_flat[current_index]-next_sample_buffer).sum(axis=1))
        current_index = index_buffer[closest]
        weve_aint_done_yet = i+smoothing_steps+1 < len(x)
        next_index = i+smoothing_steps+1
        next_sample_buffer[closest] = x_flat[next_index] if weve_aint_done_yet else float('inf')
        index_buffer[closest] = next_index if weve_aint_done_yet else -1
        p()
    return shuffling_indices
项目:invo    作者:rafidrm    | 项目源码 | 文件源码
def _solveHyperplaneProjection(self, points):
        m, n = self.A.shape
        errors = np.zeros(m)
        for i in range(m):
            if i in self.ban_constraints:
                errors[i] = 9999999
            else:
                ai = self.A[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
                bi = self.b[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
                errors[i] = np.sum([ai * pt - bi for pt in points])
        minInd = np.argmin(errors)
        self.c = self.A[minInd] / np.linalg.norm(self.A[minInd].T,
                                                 self.normalize_c)
        self.c = self.c.tolist()[0]
        self.error = errors[minInd]
        self.dual = np.zeros(m)
        self.dual[minInd] = 1 / np.linalg.norm(self.A[minInd].T,
                                               self.normalize_c)
        self._solved = True
        return errors[minInd]
项目:code    作者:ActiveState    | 项目源码 | 文件源码
def get_min_pos_kinect():

    (depth,_) = get_depth()

    minVal = np.min(depth) #This is the minimum value from the depth image
    minPos = np.argmin(depth) #This is the raw index of the minimum value above
    xPos = np.mod(minPos, xSize) #This is the x component of the raw index
    yPos = minPos//xSize #This is the y component of the raw index

    xList.append(xPos)
    del xList[0]
    xPos = int(np.mean(xList))
    yList.append(yPos)
    del yList[0]
    yPos = int(np.mean(yList))

    return (xSize - xPos-10, yPos, minVal)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def al_just_expert(adata, clf, thresh_random = 3):
    n = len(adata.taken_items)        # examples taken
    m = adata.mat.shape[0]          # examples available
    if m < 1: return -1
    if n < thresh_random or not adata.taken_both_classes():
        i = random.randint(0, m-1)
        return adata.query_expert_direct(i)

    # uncertainty sampling

    # undersample:
    #mat, rel = undersam(adata.taken_mat.tocsr(), adata.taken_rel)
    #clf.fit(mat, rel)

    clf.fit(adata.taken_mat, adata.taken_rel)
    pp = clf.predict_proba(adata.mat)
    uncertain = np.abs(pp[:,0] - 0.5)

    i = np.argmin(uncertain)
    j = np.argmin(pp[:,0])
    #print pp[i,0]
    return adata.query_expert_direct(i)
项目:code-uai16    作者:thanhan    | 项目源码 | 文件源码
def al_crowd_fin_expert(adata, clf, turk_uncer, crowd_budget = 5*1500):
    if adata.spent_crowd < crowd_budget and len(adata.rel) > 0:
        res = al_just_crowd(adata, clf)
        if res != -1: return res

    print "q expert"
    n = len(adata.taken_items)
    crowd_prob = np.zeros(n)
    found = False
    for i in range(n):
        if not adata.expert_fixed[i]:
            found = True
            j = adata.taken_items[i]
            crowd_prob[i] = turk_uncer[j][0] *1.0/ (turk_uncer[j][0] + turk_uncer[j][1])
        else:
            crowd_prob[i] = 100

    if not found: return -1

    uncertain = np.abs(crowd_prob - 0.5)
    i = np.argmin(uncertain)

    #print i, adata.expert_fixed[i]
    print "most", turk_uncer[adata.taken_items[i]]
    return adata.query_expert_fix(i)
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _spatial_sort(glyph):
  from scipy.spatial.distance import cdist
  from numpy import argsort
  from numpy import argmin

  curr = argmin(glyph[:,0])
  visited = set([curr])
  order = [curr]

  dd = cdist(glyph, glyph)

  while len(visited)<len(glyph):
    row = dd[curr,:]

    for i in argsort(row):
      if row[i]<=0.0 or i==curr or i in visited:
        continue
      order.append(i)
      visited.add(i)
      break
  glyph[:,:] = glyph[order,:]
项目:bambi    作者:bambinos    | 项目源码 | 文件源码
def _hpd_interval(self, x, width):
        """
        Code adapted from pymc3.stats.calc_min_interval:
        https://github.com/pymc-devs/pymc3/blob/master/pymc3/stats.py
        """
        x = np.sort(x)
        n = len(x)

        interval_idx_inc = int(np.floor(width * n))
        n_intervals = n - interval_idx_inc
        interval_width = x[interval_idx_inc:] - x[:n_intervals]

        if len(interval_width) == 0:
            raise ValueError('Too few elements for interval calculation')

        min_idx = np.argmin(interval_width)
        hdi_min = x[min_idx]
        hdi_max = x[min_idx + interval_idx_inc]

        index = ['hpd{}_{}'.format(width, x) for x in ['lower', 'upper']]
        return pd.Series([hdi_min, hdi_max], index=index)
项目:CycleGAN-Tensorflow-PyTorch-Simple    作者:LynnHo    | 项目源码 | 文件源码
def contest(self, b, g, r):
        """ Search for biased BGR values
                Finds closest neuron (min dist) and updates self.freq
                finds best neuron (min dist-self.bias) and returns position
                for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
                self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
        i, j = self.SPECIALS, self.NETSIZE
        dists = abs(self.network[i:j] - np.array([b, g, r])).sum(1)
        bestpos = i + np.argmin(dists)
        biasdists = dists - self.bias[i:j]
        bestbiaspos = i + np.argmin(biasdists)
        self.freq[i:j] *= (1 - self.BETA)
        self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
        self.freq[bestpos] += self.BETA
        self.bias[bestpos] -= self.BETAGAMMA
        return bestbiaspos
项目:monogreedy    作者:jinjunqi    | 项目源码 | 文件源码
def tune_tal(mono_phi_score, tal_list):
    errs = []
    tals = []
    for tal in tal_list:
        err = []
        for i in range(len(mono_phi_score)):
            mono_1 = numpy.delete(mono_phi_score, i, axis=0)
            dim_h = mono_phi_score[i][:-1]
            value_h, alpha = train_predict_regression(mono_1, dim_h, tal)
            err.append((value_h - mono_phi_score[i][-1])**2)
        err = numpy.mean(err)

        errs.append(err)
        tals.append(tal)
        print 'regression tal:', tal, 'err', err

    idx = numpy.argmin(errs)

    return tals[idx]
项目:audio_scripts    作者:audiofilter    | 项目源码 | 文件源码
def find_min_phase(sdata,a,f,sr,phase):
    rms1 = 0
    rms2 = 0
    rms3 = 0
    samples = len(sdata)
    for i in xrange(samples):
        diff1 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[0]))
        rms1 += diff1*diff1
        diff2 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[1]))
        rms2 += diff2*diff2
        diff3 = (sdata[i] - a*cos(2*pi*i*f/sr + phase[2]))
        rms3 += diff3*diff3
    rms = numpy.zeros(3)
    rms[0] = rms1
    rms[1] = rms2
    rms[2] = rms3
    i = numpy.argmin(rms)
    p = phase[i]
    return i,p
项目:kmeans-service    作者:MAYHEM-Lab    | 项目源码 | 文件源码
def _center_mahalanobis(self, data):
        """
        Finds a point that is in the center of the data using Mahalanobis distance.

        Parameters
        ----------
        data: input data as numpy array

        Returns
        -------
        mean: numpy array
        """
        distances = cdist(data, data, metric='mahalanobis', VI=self._inv_covar_matrices)
        sum_distances = np.sum(distances, axis=0)
        center_idx = np.argmin(sum_distances)
        return data[center_idx]
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:pysheds    作者:mdbartos    | 项目源码 | 文件源码
def nearest_cell(self, x, y, bbox=None, shape=None):
        """
        Returns the index of the cell (column, row) closest
        to a given geographical coordinate.

        Parameters
        ----------
        x : int or float
            x coordinate.
        y : int or float
            y coordinate.
        """

        if not bbox:
            bbox = self._bbox
        if not shape:
            shape = self.shape
        # Note: this speedup assumes grid cells are square
        y_ix, x_ix = self.bbox_indices(self._bbox, self.shape)
        y_ix += self.cellsize / 2.0
        x_ix += self.cellsize / 2.0
        desired_y = np.argmin(np.abs(y_ix - y))
        desired_x = np.argmin(np.abs(x_ix - x))
        return desired_x, desired_y
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def autoencoder(args, model):
    latent_dim = args.latent_dim

    structures = read_smiles_data(args.data)

    datobj = SmilesDataGenerator(structures, 120)
    train_gen = datobj.generator(1)

    if os.path.isfile(args.model):
        model.load(datobj.chars, args.model, latent_rep_size = latent_dim)
    else:
        raise ValueError("Model file %s doesn't exist" % args.model)

    true_pred_gen = (((mat, weight, model.autoencoder.predict(mat))
                      for (mat, _, weight) in train_gen))
    text_gen = ((str.join('\n',
                          [str((datobj.table.decode(true_mat[vec_ix])[:np.argmin(weight[vec_ix])],
                                datobj.table.decode(vec)[:]))
                           for (vec_ix, vec) in enumerate(pred_mat)]))
                for (true_mat, weight, pred_mat) in true_pred_gen)
    for _ in range(args.sample):
        print(text_gen.next())
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_output_shape(self):
        # see also gh-616
        a = np.ones((10, 5))
        # Check some simple shape mismatches
        out = np.ones(11, dtype=np.int_)
        assert_raises(ValueError, a.argmin, -1, out)

        out = np.ones((2, 5), dtype=np.int_)
        assert_raises(ValueError, a.argmin, -1, out)

        # these could be relaxed possibly (used to allow even the previous)
        out = np.ones((1, 10), dtype=np.int_)
        assert_raises(ValueError, a.argmin, -1, out)

        out = np.ones(10, dtype=np.int_)
        a.argmin(-1, out=out)
        assert_equal(out, a.argmin(-1))
项目:amset    作者:hackingmaterials    | 项目源码 | 文件源码
def get_closest_k(kpoint, ref_ks, return_diff=False):
    """
    returns the list of difference between kpoints. If return_diff True, then
        for a given kpoint the minimum distance among distances with ref_ks is
        returned or just the reference kpoint that results if not return_diff
    Args:
        kpoint (1x3 array): the coordinates of the input k-point
        ref_ks ([1x3 array]): list of reference k-points from which the
            distance with initial_ks are calculated
        return_diff (bool): if True, the minimum distance is returned
    Returns (1x3 array):
    """
    min_dist_ik = np.array([norm(ki - kpoint) for ki in ref_ks]).argmin()
    if return_diff:
        return kpoint - ref_ks[min_dist_ik]
    else:
        return ref_ks[min_dist_ik]
项目:office-interoperability-tools    作者:milossramek    | 项目源码 | 文件源码
def mergeLocation(tx0, sp0, tx1, sp1):
    """
    find merge location in (tx0,sp0)
    """
    if len(tx0) < 2: 
        return 9999
    txmin=min( np.array(tx0)[:,1] ) # minimal line heigh, used as detection threshold
    txx=[]
    for i in range(min(len(tx0)-1,len(tx1))):
        tx=tx0[i][1] + sp0[i][1] + tx0[i+1][1]
        txx.append(tx - tx1[i][1])
    cc = np.argmin(txx)
    if txx[cc] < txmin/3: #expected to be near 0
        return cc
    else:
        return 9999
项目:MIT-Thesis    作者:alec-heif    | 项目源码 | 文件源码
def _fit(self, dataset):
        est = self.getOrDefault(self.estimator)
        epm = self.getOrDefault(self.estimatorParamMaps)
        numModels = len(epm)
        eva = self.getOrDefault(self.evaluator)
        tRatio = self.getOrDefault(self.trainRatio)
        seed = self.getOrDefault(self.seed)
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))
        metrics = [0.0] * numModels
        condition = (df[randCol] >= tRatio)
        validation = df.filter(condition)
        train = df.filter(~condition)
        for j in range(numModels):
            model = est.fit(train, epm[j])
            metric = eva.evaluate(model.transform(validation, epm[j]))
            metrics[j] += metric
        if eva.isLargerBetter():
            bestIndex = np.argmax(metrics)
        else:
            bestIndex = np.argmin(metrics)
        bestModel = est.fit(dataset, epm[bestIndex])
        return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def contour_to_monitor_coords(screenCnt):
    '''Apply pyimagesearch algorithm to identify tl,tr,br,bl points from a contour'''
    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = screenCnt.reshape(4, 2)
    rect = np.zeros((4, 2), dtype = "float32")

    # the top-left point has the smallest sum whereas the
    # bottom-right has the largest sum
    s = pts.sum(axis = 1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]

    # compute the difference between the points -- the top-right
    # will have the minumum difference and the bottom-left will
    # have the maximum difference
    diff = np.diff(pts, axis = 1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]

    return rect
项目:tomato    作者:sertansenturk    | 项目源码 | 文件源码
def _get_relative_note_occurences(note_models, pitch_distribution):
        max_rel_occur = 0
        for note_symbol, note in iteritems(note_models):
            try:
                # get the relative occurrence of each note from the pitch
                # distribution
                dists = np.array([abs(note['stable_pitch']['value'] - dist_bin)
                                  for dist_bin in pitch_distribution.bins])
            except TypeError:
                logging.info(u'The stable pitch for {0:s} is not computed'
                             .format(note_symbol))
                # use the max peak even if it's weak, far from theoretical etc.
                peak_idx, heights = note['distribution'].detect_peaks()
                max_peak_ind = peak_idx[np.argmax(heights)]
                max_bin = note['distribution'].bins[max_peak_ind]
                dists = np.array([abs(max_bin - dist_bin)
                                  for dist_bin in pitch_distribution.bins])

            peak_ind = np.argmin(dists)
            note['rel_occur'] = pitch_distribution.vals[peak_ind]
            max_rel_occur = max([max_rel_occur, note['rel_occur']])

        return max_rel_occur
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _evalAndDer(self,x):
        '''
        Returns the level and first derivative of the function at each value in
        x.  Only called internally by HARKinterpolator1D.eval_and_der.
        '''
        m = len(x)
        fx = np.zeros((m,self.funcCount))
        for j in range(self.funcCount):
            fx[:,j] = self.functions[j](x)
        fx[np.isnan(fx)] = np.inf
        i = np.argmin(fx,axis=1)
        y = fx[np.arange(m),i]
        dydx = np.zeros_like(y)
        for j in range(self.funcCount):
            c = i == j
            dydx[c] = self.functions[j].derivative(x[c])
        return y,dydx
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derX(self,x,y):
        '''
        Returns the first derivative of the function with respect to X at each
        value in (x,y).  Only called internally by HARKinterpolator2D._derX.
        '''
        m = len(x)
        temp = np.zeros((m,self.funcCount))
        for j in range(self.funcCount):
            temp[:,j] = self.functions[j](x,y)
        temp[np.isnan(temp)] = np.inf
        i = np.argmin(temp,axis=1)
        dfdx = np.zeros_like(x)
        for j in range(self.funcCount):
            c = i == j
            dfdx[c] = self.functions[j].derivativeX(x[c],y[c])
        return dfdx
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derY(self,x,y):
        '''
        Returns the first derivative of the function with respect to Y at each
        value in (x,y).  Only called internally by HARKinterpolator2D._derY.
        '''
        m = len(x)
        temp = np.zeros((m,self.funcCount))
        for j in range(self.funcCount):
            temp[:,j] = self.functions[j](x,y)
        temp[np.isnan(temp)] = np.inf
        i = np.argmin(temp,axis=1)
        y = temp[np.arange(m),i]
        dfdy = np.zeros_like(x)
        for j in range(self.funcCount):
            c = i == j
            dfdy[c] = self.functions[j].derivativeY(x[c],y[c])
        return dfdy
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derX(self,x,y,z):
        '''
        Returns the first derivative of the function with respect to X at each
        value in (x,y,z).  Only called internally by HARKinterpolator3D._derX.
        '''
        m = len(x)
        temp = np.zeros((m,self.funcCount))
        for j in range(self.funcCount):
            temp[:,j] = self.functions[j](x,y,z)
        temp[np.isnan(temp)] = np.inf
        i = np.argmin(temp,axis=1)
        dfdx = np.zeros_like(x)
        for j in range(self.funcCount):
            c = i == j
            dfdx[c] = self.functions[j].derivativeX(x[c],y[c],z[c])
        return dfdx
项目:HARK    作者:econ-ark    | 项目源码 | 文件源码
def _derY(self,x,y,z):
        '''
        Returns the first derivative of the function with respect to Y at each
        value in (x,y,z).  Only called internally by HARKinterpolator3D._derY.
        '''
        m = len(x)
        temp = np.zeros((m,self.funcCount))
        for j in range(self.funcCount):
            temp[:,j] = self.functions[j](x,y,z)
        temp[np.isnan(temp)] = np.inf
        i = np.argmin(temp,axis=1)
        y = temp[np.arange(m),i]
        dfdy = np.zeros_like(x)
        for j in range(self.funcCount):
            c = i == j
            dfdy[c] = self.functions[j].derivativeY(x[c],y[c],z[c])
        return dfdy
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_shortest_2(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.6827
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
项目:ChainConsumer    作者:Samreay    | 项目源码 | 文件源码
def test_summary_max_shortest_3(self):
        c = ChainConsumer()
        c.add_chain(self.data_skew)
        summary_area = 0.95
        c.configure(statistics="max_shortest", bins=1.0, summary_area=summary_area)
        summary = c.analysis.get_summary()['0']

        xs = np.linspace(-1, 5, 1000)
        pdf = skewnorm.pdf(xs, 5, 1, 1.5)
        cdf = skewnorm.cdf(xs, 5, 1, 1.5)
        x2 = interp1d(cdf, xs, bounds_error=False, fill_value=np.inf)(cdf + summary_area)
        dist = x2 - xs
        ind = np.argmin(dist)
        x0 = xs[ind]
        x2 = x2[ind]
        xmax = xs[pdf.argmax()]

        assert np.isclose(xmax, summary[1], atol=0.05)
        assert np.isclose(x0, summary[0], atol=0.05)
        assert np.isclose(x2, summary[2], atol=0.05)
项目:Auspex    作者:BBN-Q    | 项目源码 | 文件源码
def find_closest(t, v, t0, v0):
    """ Find the closest point on the curve f = a + b/x
    to the given point (t,v)
    """
    a = v0
    b = v0*t0
    # Solve for intersection points
    eqn_coefs = [1/b, -t/b, 0, v-a, -b]
    tis = np.roots(eqn_coefs)
    tis = tis[abs(tis.imag/tis.real)<0.01].real # We care only real solutions
    tis = tis[tis>0] # and positive ones
    # Choose the shortest among solutions
    ds = abs(tis-t)*np.sqrt(1 + np.power(tis,4)/(b*b)) # Distance from solutions to given point (t,v)
    idx = np.argmin(ds)
    ti = tis[idx]
    vi = a + b/ti
    return ti, vi
项目:Auspex    作者:BBN-Q    | 项目源码 | 文件源码
def find_null_offset(xpts, powers, default=0.0):
    """Finds the offset corresponding to the minimum power using a fit to the measured data"""
    def model(x, a, b, c):
        return a*(x - b)**2 + c
    powers = np.power(10, powers/10.)
    min_idx = np.argmin(powers)
    try:
        fit = curve_fit(model, xpts, powers, p0=[1, xpts[min_idx], powers[min_idx]])
    except RuntimeError:
        logger.warning("Mixer null offset fit failed.")
        return default, np.zeros(len(powers))
    best_offset = np.real(fit[0][1])
    best_offset = np.minimum(best_offset, xpts[-1])
    best_offset = np.maximum(best_offset, xpts[0])
    xpts_fine = np.linspace(xpts[0],xpts[-1],101)
    fit_pts = np.array([np.real(model(x, *fit[0])) for x in xpts_fine])
    if min(fit_pts)<0: fit_pts-=min(fit_pts)-1e-10 #prevent log of a negative number
    return best_offset, xpts_fine, 10*np.log10(fit_pts)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_reduce_argmin():
    def argmin(ndarray, axis, keepdims=False):
        res = np.argmin(ndarray, axis=axis)
        if keepdims:
            res = np.expand_dims(res, axis=axis)
        return res

    data = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)

    assert np.array_equal(import_and_compute('ArgMin', data, axis=0),
                          argmin(data, keepdims=True, axis=0))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=0, keepdims=0),
                          argmin(data, keepdims=False, axis=0))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=1),
                          argmin(data, keepdims=True, axis=1))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=1, keepdims=0),
                          argmin(data, keepdims=False, axis=1))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=2),
                          argmin(data, keepdims=True, axis=2))
    assert np.array_equal(import_and_compute('ArgMin', data, axis=2, keepdims=0),
                          argmin(data, keepdims=False, axis=2))
项目:vampyre    作者:GAMPTeam    | 项目源码 | 文件源码
def logistic_var():
    """
    Finds a variance to match probit and logistic regression.

    Finds a variance :math:`\\tau_w` such that,

        :math:`p=P(W < z) \\approx \\frac{1}{1+e^{-z}},`

    where :math:`W \\sim {\\mathcal N}(0,\\tau_w)`.
    """
    z = np.linspace(-5,5,1000)      # z points to test
    p1 = 1/(1+np.exp(-z))           # target probability
    var_test = np.linspace(2,3,1000)
    err = []
    for v in var_test:
        p2 = 0.5*(1+scipy.special.erf(z/np.sqrt(v*2)))
        err.append(np.mean((p1-p2)**2))

    i = np.argmin(err)
    wvar = var_test[i]
    return wvar
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def NLS_annealing(F, xi, yi, p, N=100, n=10, sigma=5.,factor=0.5):
    # N = size of population in one iteration
    # n = number of iterations
    # sigma = initial (multiplicative) standard deviation
    # factor = factor to reduce sigma per iteration
    print "initial", p
    p = np.atleast_1d(p)
    dim = len(p)
    # make initial sigma act like multiplication by sigma^(+-1)
    sigma = np.log(sigma)*np.ones(dim)

    for k in range(n):
        # create new population by adding multiplicative gaussian noise
        P = p[None, :] * np.exp(np.random.randn(N, dim) * sigma[None, :])
        # compute mean square loss on population
        f = np.mean((F(xi[None, :], P) - yi)**2, 1)
        # replace p by new best guess
        p = P[np.argmin(f), :]
        # update sigma
        sigma *= factor
        print "parameters:", p
    print "minimum", min(f)

    return tuple(p)
项目:DenseHumanBodyCorrespondences    作者:halimacc    | 项目源码 | 文件源码
def furthest_point_sample(vertices, faces, N, K):
    num_vertices = vertices.shape[0]
    center_indices = np.random.choice(num_vertices, N, replace=False)
    sqr_dists = 1e10 * np.ones(num_vertices)
    vertex_as = np.zeros(num_vertices, dtype=np.int32)
    for i in range(N):
        new_sqr_dists = np.sum(np.square(vertices - vertices[center_indices[i]]), 1)
        update_mask = new_sqr_dists < sqr_dists
        sqr_dists[update_mask] = new_sqr_dists[update_mask]
        vertex_as[update_mask] = i
        next_center = np.argmax(sqr_dists)
        if K - 1 <= i < N - 1:
            center_indices[i + 1] = next_center

    centers = vertices[center_indices]
    face_centers = np.mean(vertices[faces], 1)
    sqr_dists = sqr_dist(centers, face_centers)
    face_as = np.argmin(sqr_dists, 1)
    return center_indices, vertex_as, face_as
项目:rir-database    作者:Marvin182    | 项目源码 | 文件源码
def createLists(dbFilename):
    print('Splitting RIRs into sets...')
    sets = [
        RirSet('train', 0.8),
        RirSet('test', 0.1),
        RirSet('dev', 0.1),
    ]

    # open database
    rirDb = json.load(open(dbFilename))
    rirs = sorted(list(rirDb.keys()))

    # to distribute the RIRs to the set we could to a shuffle, but as they are in alphabetical order and just going over them guaranties that we distribute the different conditions (mostly) equally on the different sets
    sets[0].add(rirs[0])
    for i in range(1, len(rirs)):
        si = np.argmin([s.missing(i) for s in sets])
        sets[si].add(rirs[i])

    # safe set files
    util.createDirectory(ListDir)
    for s in sets:
        s.save(ListDir)
项目:eemeter    作者:openeemeter    | 项目源码 | 文件源码
def lat_lng_to_usaf_station(lat, lng):
    """Return the closest USAF station ID using latitude and
    longitude coordinates.

    Parameters
    ----------
    lat : float
        Latitude coordinate.
    lng : float
        Longitude coordinate.

    Returns
    -------
    station : str, None
        String representing a USAF weather station ID or None, if none was
        found.
    """
    if lat is None or lng is None:
        return None
    usaf_station_to_lat_lng_index = _load_usaf_station_to_lat_lng_index()
    index_list = list(usaf_station_to_lat_lng_index.items())
    dists = [haversine(lat, lng, stat_lat, stat_lng)
             for _, (stat_lat, stat_lng) in index_list]
    return index_list[np.argmin(dists)][0]
项目:eemeter    作者:openeemeter    | 项目源码 | 文件源码
def lat_lng_to_tmy3_station(lat, lng):
    """Return the closest TMY3 station ID using latitude and
    longitude coordinates.

    Parameters
    ----------
    lat : float
        Latitude coordinate.
    lng : float
        Longitude coordinate.

    Returns
    -------
    station : str, None
        String representing a TMY3 weather station ID or None, if none was
        found.
    """
    if lat is None or lng is None:
        return None
    tmy3_station_to_lat_lng_index = _load_tmy3_station_to_lat_lng_index()
    index_list = list(tmy3_station_to_lat_lng_index.items())
    dists = [haversine(lat, lng, stat_lat, stat_lng)
             for _, (stat_lat, stat_lng) in index_list]
    return index_list[np.argmin(dists)][0]
项目:eemeter    作者:openeemeter    | 项目源码 | 文件源码
def lat_lng_to_zipcode(lat, lng):
    """Return the closest ZIP code using latitude and
    longitude coordinates.

    Parameters
    ----------
    lat : float
        Latitude coordinate.
    lng : float
        Longitude coordinate.

    Returns
    -------
    zipcode : str, None
        String representing a USPS ZIP code, or None, if none was found.

    """

    if lat is None or lng is None:
        return None
    zipcode_to_lat_lng_index = _load_zipcode_to_lat_lng_index()
    index_list = list(zipcode_to_lat_lng_index.items())
    dists = [haversine(lat, lng, zip_lat, zip_lng)
             for _, (zip_lat, zip_lng) in index_list]
    return index_list[np.argmin(dists)][0]