Python numpy 模块,amax() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.amax()

项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def min_side(_, pos):
    """
    Given an object pixels' positions, return the minimum side length of its
    bounding box
    :param _: pixel values (unused)
    :param pos: pixel position (1-D)
    :return: minimum bounding box side length
    """
    xs = np.array([i / SSIZE for i in pos])
    ys = np.array([i % SSIZE for i in pos])
    minx = np.amin(xs)
    miny = np.amin(ys)
    maxx = np.amax(xs)
    maxy = np.amax(ys)
    ct1 = compute_line(np.array([minx, miny]), np.array([minx, maxy]))
    ct2 = compute_line(np.array([minx, miny]), np.array([maxx, miny]))
    return min(ct1, ct2)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def view_trigger_snippets_bis(trigger_snippets, elec_index, save=None):
    fig = pylab.figure()
    ax = fig.add_subplot(1, 1, 1)
    for n in xrange(0, trigger_snippets.shape[2]):
        y = trigger_snippets[:, elec_index, n]
        x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
        b = 0.5 + 0.5 * numpy.random.rand()
        ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
    ax.grid(True)
    ax.set_xlim([numpy.amin(x), numpy.amax(x)])
    ax.set_xlabel("time")
    ax.set_ylabel("amplitude")
    if save is None:
        pylab.show()
    else:
        pylab.savefig(save)
        pylab.close(fig)
    return
项目:aapm_thoracic_challenge    作者:xf4j    | 项目源码 | 文件源码
def get_labels(contours, shape, slices):
    z = [np.around(s.ImagePositionPatient[2], 1) for s in slices]
    pos_r = slices[0].ImagePositionPatient[1]
    spacing_r = slices[0].PixelSpacing[1]
    pos_c = slices[0].ImagePositionPatient[0]
    spacing_c = slices[0].PixelSpacing[0]

    label_map = np.zeros(shape, dtype=np.float32)
    for con in contours:
        num = ROI_ORDER.index(con['name']) + 1
        for c in con['contours']:
            nodes = np.array(c).reshape((-1, 3))
            assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
            z_index = z.index(np.around(nodes[0, 2], 1))
            r = (nodes[:, 1] - pos_r) / spacing_r
            c = (nodes[:, 0] - pos_c) / spacing_c
            rr, cc = polygon(r, c)
            label_map[z_index, rr, cc] = num

    return label_map
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def to_rgb(img):
    """
    Converts the given array into a RGB image. If the number of channels is not
    3 the array is tiled such that it has 3 channels. Finally, the values are
    rescaled to [0,255) 

    :param img: the array to convert [nx, ny, channels]

    :returns img: the rgb image [nx, ny, 3]
    """
    img = np.atleast_3d(img)
    channels = img.shape[2]
    if channels < 3:
        img = np.tile(img, 3)

    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    img *= 255
    return img
项目:nn4nlp-code    作者:neubig    | 项目源码 | 文件源码
def display_data(word_vectors, words, target_words=None):
  target_matrix = word_vectors.copy()
  if target_words:
    target_words = [line.strip().lower() for line in open(target_words)][:2000]
    rows = [words.index(word) for word in target_words if word in words]
    target_matrix = target_matrix[rows,:]
  else:
    rows = np.random.choice(len(word_vectors), size=1000, replace=False)
    target_matrix = target_matrix[rows,:]
  reduced_matrix = tsne(target_matrix, 2);

  Plot.figure(figsize=(200, 200), dpi=100)
  max_x = np.amax(reduced_matrix, axis=0)[0]
  max_y = np.amax(reduced_matrix, axis=0)[1]
  Plot.xlim((-max_x,max_x))
  Plot.ylim((-max_y,max_y))

  Plot.scatter(reduced_matrix[:, 0], reduced_matrix[:, 1], 20);

  for row_id in range(0, len(rows)):
      target_word = words[rows[row_id]]
      x = reduced_matrix[row_id, 0]
      y = reduced_matrix[row_id, 1]
      Plot.annotate(target_word, (x,y))
  Plot.savefig("word_vectors.png");
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def add(self, x, y = None):
        self.X =  np.memmap(
            self.path+"/X.npy", self.X.dtype,
            shape = (self.nrows + x.shape[0] , x.shape[1])
        )
        self.X[self.nrows:self.nrows + x.shape[0],:] = x

        if y is not None:
            if x.shape != y.shape: raise "x and y should have the same shape"
            self.Y = np.memmap(
                self.path+"/Y.npy", self.Y.dtype,
                shape = (self.nrows + y.shape[0] , y.shape[1])
            )
            self.Y[self.nrows:self.nrows + y.shape[0],:] = y

        delta = x - self.running_mean
        n = self.X.shape[0] + np.arange(x.shape[0]) + 1
        self.running_dev += np.sum(delta * (x - self.running_mean), 0)
        self.running_mean += np.sum(delta / n[:, np.newaxis], 0)
        self.running_max  = np.amax(np.vstack((self.running_max, x)), 0)
        self.running_min  = np.amin(np.vstack((self.running_min, x)), 0)
        self.nrows += x.shape[0]
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def _make_grid(dim=(11,4)):
    """
    this function generates the structure for an asymmetrical circle grid
    domain (0-1)
    """
    x,y = range(dim[0]),range(dim[1])
    p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32)
    p[:,1::2,1] += 0.5
    p = np.reshape(p, (-1,2), 'F')

    # scale height = 1
    x_scale =  1./(np.amax(p[:,0])-np.amin(p[:,0]))
    y_scale =  1./(np.amax(p[:,1])-np.amin(p[:,1]))

    p *=x_scale,x_scale/.5

    return p
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def _compute_stats(self, pred, expo, loss, prem):
        n_samples, n_groups = pred.shape[0], self.n_groups
        pred_ser = pd.Series(pred)
        loss_to_returns = np.sum(loss) / np.sum(prem)

        rank = pd.qcut(pred_ser, n_groups, labels=False)
        n_groups = np.amax(rank) + 1
        groups = np.arange(n_groups)  # if we ever go back to using n_groups...

        tab = pd.DataFrame({
            'rank': rank,
            'pred': pred,
            'prem': prem,
            'loss': loss,
            'expo': expo
        })

        grouped = tab[['rank', 'pred', 'prem', 'loss', 'expo']].groupby('rank')
        agg_rlr = (grouped['loss'].agg(np.sum) / grouped['prem'].agg(np.sum)) / loss_to_returns

        return tab, agg_rlr, n_groups
项目:CartPole-v0    作者:hmtai6    | 项目源码 | 文件源码
def reply(self):        
        batch = self.memory.sample(nbReplay)

        states = np.array([ o[0] for o in batch ])
        states_ = np.array([ (nbReplay if o[3] is None else o[3]) for o in batch ])

        p = agent.brain.predict(states)
        p_ = agent.brain.predict(states_)

        x = np.zeros((nbReplay, self.stateCnt))
        y = np.zeros((nbReplay, self.actionCnt))     

        for i in range(nbReplay):
            o = batch[i]
            s = o[0]; a = o[1]; r = o[2]; s_ = o[3]

            t = p[i]
            if s_ is None:
                t[a] = r
            else:
                t[a] = r + td_discount_rate * numpy.amax(p_[i])

            x[i] = s
            y[i] = t
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def __call__(self, points, data):
        _,c = data.shape

        dist_mat = []
        if self.p == np.inf:
            for pt in points:
                pt = pt.reshape(1,c)
                row =  np.amax(np.abs(data - pt),axis=-1)
                dist_mat.append(row)
        else:
            for pt in points:
                pt = pt.reshape(1,c)
                row = np.sum(np.abs(data - pt)**self.p,axis=1)**(1.0/self.p)
                dist_mat.append(row)

        return np.array(dist_mat)
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def __call__(self, points, data):
        _,c = data.shape

        dist_mat = []
        if self.p == np.inf:
            for pt in points:
                pt = pt.reshape(1,c)
                raw_dist = self.scale*(data - pt)
                robust_dist = raw_dist/np.sqrt(1+raw_dist**2) # sigmoid - locally linear
                row =  np.amax(np.abs(robust_dist),axis=-1)
                dist_mat.append(row)
        else:
            for pt in points:
                pt = pt.reshape(1,c)
                raw_dist = self.scale*(data - pt)
                robust_dist = raw_dist/np.sqrt(1+raw_dist**2) # sigmoid - locally linear
                row = np.sum(np.abs(robust_dist)**self.p,axis=1)**(1.0/self.p)
                dist_mat.append(row)

        return np.array(dist_mat)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def compute_n_classes(classes, config):
    """The number of cluster centres to use for K-means

    Just handles the case where someone specifies k=5 but labels 10 classes
    in the training data. This will return k=10.

    Parameters
    ----------
    classes : ndarray
        an array of hard class assignments given as training data
    config : Config
        The app config class holding the number of classes asked for

    Returns
    -------
    k : int > 0
        The max of k and the number of classes referenced in the training data
    """
    k = mpiops.comm.allreduce(np.amax(classes), op=mpiops.MPI.MAX)
    k = int(max(k, config.n_classes))
    return k
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def input_fn(df):
    """Format the downloaded data."""
    # Creates a dictionary mapping from each continuous feature column name (k)
    # to the values of that column stored in a constant Tensor.
    continuous_cols = [df[k].values for k in CONTINUOUS_COLUMNS]
    X_con = np.stack(continuous_cols).astype(np.float32).T

    # Standardise
    X_con -= X_con.mean(axis=0)
    X_con /= X_con.std(axis=0)

    # Creates a dictionary mapping from each categorical feature column name
    categ_cols = [np.where(pd.get_dummies(df[k]).values)[1][:, np.newaxis]
                  for k in CATEGORICAL_COLUMNS]
    n_values = [np.amax(c) + 1 for c in categ_cols]
    X_cat = np.concatenate(categ_cols, axis=1).astype(np.int32)

    # Converts the label column into a constant Tensor.
    label = df[LABEL_COLUMN].values[:, np.newaxis]

    # Returns the feature columns and the label.
    return X_con, X_cat, n_values, label
项目:fexum    作者:KDD-OpenSource    | 项目源码 | 文件源码
def calculate_feature_statistics(feature_id):
    feature = Feature.objects.get(pk=feature_id)

    dataframe = _get_dataframe(feature.dataset.id)
    feature_col = dataframe[feature.name]

    feature.min = np.amin(feature_col).item()
    feature.max = np.amax(feature_col).item()
    feature.mean = np.mean(feature_col).item()
    feature.variance = np.nanvar(feature_col).item()
    unique_values = np.unique(feature_col)
    integer_check = (np.mod(unique_values, 1) == 0).all()
    feature.is_categorical = integer_check and (unique_values.size < 10)
    if feature.is_categorical:
        feature.categories = list(unique_values)
    feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories'])

    del unique_values, feature
项目:pyglmnet    作者:glm-tools    | 项目源码 | 文件源码
def softmax(w):
    """Softmax function of given array of number w.

    Parameters
    ----------
    w: array | list
        The array of numbers.

    Returns
    -------
    dist: array
        The resulting array with values ranging from 0 to 1.
    """
    w = np.array(w)
    maxes = np.amax(w, axis=1)
    maxes = maxes.reshape(maxes.shape[0], 1)
    e = np.exp(w - maxes)
    dist = e / np.sum(e, axis=1, keepdims=True)
    return dist
项目:fdsgeogen    作者:FireDynamics    | 项目源码 | 文件源码
def decompose(n, p):
    p_primes = primes(p)[::-1]
    procs = np.array([1,1,1])
    n_tmp = np.copy(n)

    for fac in p_primes:
        while (np.any(n_tmp > 0)):
            cpmax = np.argmax(n_tmp)
            cmax  = np.amax(n_tmp)
            if (cmax % fac == 0):
                n_tmp[cpmax] /= fac
                procs[cpmax] *= fac
                break
            else:
                n_tmp[cpmax] = -n_tmp[cpmax]

        if np.all(n_tmp < 0):
            print "!! decomposition does not work out... ", fac, n_tmp
            sys.exit()

        n_tmp = np.abs(n_tmp)

    print " - decomposition: resulting proc decomposition ", procs, " local mesh size ", n_tmp
    return procs[0], procs[1], procs[2]
项目:specGAN    作者:OSU-slatelab    | 项目源码 | 文件源码
def find_min_max(scp_file):
    minimum = float("inf")
    maximum = -float("inf")
    uid = 0
    offset = 0
    ark_dict, uid = read_mats(uid, offset, scp_file)
    while ark_dict:
        for key in ark_dict.keys():
            mat_max = np.amax(ark_dict[key])
            mat_min = np.amin(ark_dict[key])
            if mat_max > maximum:
                maximum = mat_max
            if mat_min < minimum:
                minimum = mat_min
        ark_dict, uid = read_mats(uid, offset, scp_file)
    print("min:", minimum, "max:", maximum)
    return minimum, maximum
项目:SecuML    作者:ANSSI-FR    | 项目源码 | 文件源码
def displayDataset(self, dataset):
        eps = 0.00001
        linewidth = dataset.linewidth
        if np.var(dataset.values) < eps:
            linewidth += 2
            mean = np.mean(dataset.values)
            x = np.arange(0, 1, 0.1)
            x = np.sort(np.append(x, [mean, mean-eps, mean+eps]))
            density = [1 if v == mean else 0 for v in x]
        else:
            self.kde.fit(np.asarray([[x] for x in dataset.values]))
            ## Computes the x axis
            x_max = np.amax(dataset.values)
            x_min = np.amin(dataset.values)
            delta = x_max - x_min
            density_delta = 1.1 * delta
            x = np.arange(x_min, x_max, density_delta / self.num_points)
            x_density = [[y] for y in x]
            ## kde.score_samples returns the 'log' of the density
            log_density = self.kde.score_samples(x_density).tolist()
            density = map(math.exp, log_density)
        self.ax.plot(x, density, label = dataset.label, color = dataset.color,
                linewidth = linewidth, linestyle = dataset.linestyle)
项目:SecuML    作者:ANSSI-FR    | 项目源码 | 文件源码
def display(self, output_filename):
        fig, (ax) = plt.subplots(1, 1)
        data   = [d.values for d in self.datasets]
        labels = [d.label for d in self.datasets]
        bp = ax.boxplot(data, labels = labels, notch = 0, sym = '+', vert = '1', whis = 1.5)
        plt.setp(bp['boxes'], color='black')
        plt.setp(bp['whiskers'], color='black')
        plt.setp(bp['fliers'], color='black', marker='+')
        for i in range(len(self.datasets)):
            box = bp['boxes'][i]
            box_x = []
            box_y = []
            for j in range(5):
                box_x.append(box.get_xdata()[j])
                box_y.append(box.get_ydata()[j])
            box_coords = list(zip(box_x, box_y))
            box_polygon = Polygon(box_coords, facecolor = self.datasets[i].color)
            ax.add_patch(box_polygon)
        if self.title is not None:
            ax.set_title(self.title)
        x_min = np.amin([np.amin(d.values) for d in self.datasets])
        x_max = np.amax([np.amax(d.values) for d in self.datasets])
        ax.set_ylim(x_min - 0.05*(x_max - x_min), x_max + 0.05*(x_max - x_min))
        fig.savefig(output_filename)
        plt.close(fig)
项目:bgsCNN    作者:SaoYan    | 项目源码 | 文件源码
def build_img_pair(img_batch):
    input_cast = img_batch[:,:,:,0:6].astype(dtype = np.float32)
    input_min = np.amin(input_cast, axis=(1,2,3))
    input_max = np.amax(input_cast, axis=(1,2,3))
    for i in range(3):
        input_min = np.expand_dims(input_min, i+1)
        input_max = np.expand_dims(input_max, i+1)
    input_norm = (input_cast - input_min) / (input_max - input_min)
    gt_cast = img_batch[:,:,:,6].astype(dtype = np.float32)
    gt_cast = np.expand_dims(gt_cast, 3)
    gt_min = np.amin(gt_cast, axis=(1,2,3))
    gt_max = np.amax(gt_cast, axis=(1,2,3))
    for i in range(3):
        gt_min = np.expand_dims(gt_min, i+1)
        gt_max = np.expand_dims(gt_max, i+1)
    gt_norm = (gt_cast - gt_min) / (gt_max - gt_min)
    return input_norm, gt_norm
项目:sequence-based-recommendations    作者:rdevooght    | 项目源码 | 文件源码
def load_last(self, save_dir):
        '''Load last model from dir
        '''
        def extract_number_of_epochs(filename):
            m = re.search('_ne([0-9]+(\.[0-9]+)?)_', filename)
            return float(m.group(1))

        # Get all the models for this RNN
        file = save_dir + self._get_model_filename("*")
        file = np.array(glob.glob(file))

        if len(file) == 0:
            print('No previous model, starting from scratch')
            return 0

        # Find last model and load it
        last_batch = np.amax(np.array(map(extract_number_of_epochs, file)))
        last_model = save_dir + self._get_model_filename(last_batch)
        print('Starting from model ' + last_model)
        self.load(last_model)

        return last_batch
项目:sequence-based-recommendations    作者:rdevooght    | 项目源码 | 文件源码
def load_last(self, save_dir):
        '''Load last model from dir
        '''
        def extract_number_of_epochs(filename):
            m = re.search('_ne([0-9]+(\.[0-9]+)?)_', filename)
            return float(m.group(1))

        # Get all the models for this RNN
        file = save_dir + self._get_model_filename("*")
        file = np.array(glob.glob(file))

        if len(file) == 0:
            print('No previous model, starting from scratch')
            return 0

        # Find last model and load it
        last_batch = np.amax(np.array(map(extract_number_of_epochs, file)))
        last_model = save_dir + self._get_model_filename(last_batch)
        print('Starting from model ' + last_model)
        self.load(last_model)

        return last_batch
项目:Deep-learning-Colorization-for-visual-media    作者:OmarSayedMostafa    | 项目源码 | 文件源码
def Get_Batch_Chrominance():
    ''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]

    Return:
     AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
    '''
    global AbColores_values
    global ColorImages_Batch
    AbColores_values = np.empty((Batch_size,224,224,2),"float32")
    for indx in range(Batch_size):
        lab = color.rgb2lab(ColorImages_Batch[indx])
        Min_valueA = np.amin(lab[:,:,1])
        Max_valueA = np.amax(lab[:,:,1])
        Min_valueB = np.amin(lab[:,:,2])
        Max_valueB = np.amax(lab[:,:,2])
        AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],-128,127)
        AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],-128,127)
项目:vec4ir    作者:lgalke    | 项目源码 | 文件源码
def _partial_fit(self, X, y=None):
        _checkXy(X, y)
        # update index
        self._inv_X = sp.vstack([self._inv_X, self._cv.transform(X)])
        # update source
        # self._fit_X = np.hstack([self._fit_X, np.asarray(X)])
        # try to infer viable doc ids
        if y is None:
            next_id = np.amax(self._y) + 1
            y = np.arange(next_id, next_id + len(X))
        else:
            y = np.asarray(y)
        self._y = np.hstack([self._y, y])

        self.n_docs += len(X)
        return self
项目:tensorflow-reinforce    作者:yukezhu    | 项目源码 | 文件源码
def sampleAction(self, states):
    # TODO: use this code piece when tf.multinomial gets better
    # sample action from current policy
    # actions = self.session.run(self.predicted_actions, {self.states: states})[0]
    # return actions[0]

    # temporary workaround
    def softmax(y):
      """ simple helper function here that takes unnormalized logprobs """
      maxy = np.amax(y)
      e = np.exp(y - maxy)
      return e / np.sum(e)

    # epsilon-greedy exploration strategy
    if random.random() < self.exploration:
      return random.randint(0, self.num_actions-1)
    else:
      action_scores = self.session.run(self.action_scores, {self.states: states})[0]
      action_probs  = softmax(action_scores) - 1e-5
      action = np.argmax(np.random.multinomial(1, action_probs))
      return action
项目:tensorflow-reinforce    作者:yukezhu    | 项目源码 | 文件源码
def sampleAction(self, states):
    # TODO: use this code piece when tf.multinomial gets better
    # sample action from current policy
    # actions = self.session.run(self.predicted_actions, {self.states: states})[0]
    # return actions[0]

    # temporary workaround
    def softmax(y):
      """ simple helper function here that takes unnormalized logprobs """
      maxy = np.amax(y)
      e = np.exp(y - maxy)
      return e / np.sum(e)

    # epsilon-greedy exploration strategy
    if random.random() < self.exploration:
      return random.randint(0, self.num_actions-1)
    else:
      action_scores = self.session.run(self.action_scores, {self.states: states})[0]
      action_probs  = softmax(action_scores) - 1e-5
      action = np.argmax(np.random.multinomial(1, action_probs))
      return action
项目:gym-kidney    作者:camoy    | 项目源码 | 文件源码
def pool_max(alpha):
    """
    Given list of vectors alpha. Returns entry-wise
    max.
    """
    return np.amax(alpha, axis=1)

# A DistrFun is one of:
# - p0_min, Dirac delta distribution at vertex of min degree
# - p0_max, Dirac delta distribution at vertex of max egree
# - p0_median, Dirac delta distribution at vertex of median degree
# - p0_mean, Dirac delta distribution at vertex of mean degree

#
# Walk2VecEmbedding embeds the graph according to a modified Walk2Vec
# random walk method.
# - p0s : [DistrFun], initial distributions
# - tau : Nat, steps in the random walk
# - alpha : (0, 1], jump probability
#
项目:strategy    作者:kanghua309    | 项目源码 | 文件源码
def replay(self):
        """Memory Management and training of the agent
        """
        if len(self.memory) < self.batch_size:
            return

        state, action, reward, next_state, done = self._get_batches()
        reward += (self.gamma
                   * np.logical_not(done)
                   * np.amax(self.model.predict(next_state), axis=1))
        q_target = self.target_model.predict(state)

        _ = pd.Series(action)
        one_hot = pd.get_dummies(_).as_matrix()
        action_batch = np.where(one_hot == 1)
        q_target[action_batch] = reward
        return self.model.fit(state, q_target,
                              batch_size=self.batch_size,
                              epochs=1,
                              verbose=False)
项目:Tensorflow-Softmax-NER-RNNLM    作者:queue-han    | 项目源码 | 文件源码
def test_softmax_basic():
  """
  Some simple tests to get you started. 
  Warning: these are not exhaustive.
  """
  print "Running basic tests..."
  test1 = softmax(tf.convert_to_tensor(
      np.array([[1001,1002],[3,4]]), dtype=tf.float32))
  with tf.Session():
      test1 = test1.eval()
  assert np.amax(np.fabs(test1 - np.array(
      [0.26894142,  0.73105858]))) <= 1e-6

  test2 = softmax(tf.convert_to_tensor(
      np.array([[-1001,-1002]]), dtype=tf.float32))
  with tf.Session():
      test2 = test2.eval()
  assert np.amax(np.fabs(test2 - np.array(
      [0.73105858, 0.26894142]))) <= 1e-6

  print "Basic (non-exhaustive) softmax tests pass\n"
项目:Tensorflow-Softmax-NER-RNNLM    作者:queue-han    | 项目源码 | 文件源码
def test_cross_entropy_loss_basic():
  """
  Some simple tests to get you started.
  Warning: these are not exhaustive.
  """
  y = np.array([[0, 1], [1, 0], [1, 0]])
  yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])

  test1 = cross_entropy_loss(
      tf.convert_to_tensor(y, dtype=tf.int32),
      tf.convert_to_tensor(yhat, dtype=tf.float32))
  with tf.Session():
    test1 = test1.eval()
  result = -3 * np.log(.5)
  assert np.amax(np.fabs(test1 - result)) <= 1e-6
  print "Basic (non-exhaustive) cross-entropy tests pass\n"
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def __init__(self, data, leafsize=10):
        """Construct a kd-tree.

        Parameters:
        ===========

        data : array-like, shape (n,k)
            The data points to be indexed. This array is not copied, and
            so modifying this data will result in bogus results.
        leafsize : positive integer
            The number of points at which the algorithm switches over to
            brute-force.
        """
        self.data = np.asarray(data)
        self.n, self.m = np.shape(self.data)
        self.leafsize = int(leafsize)
        if self.leafsize<1:
            raise ValueError("leafsize must be at least 1")
        self.maxes = np.amax(self.data,axis=0)
        self.mins = np.amin(self.data,axis=0)

        self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
项目:GY-91_and_PiCamera_RaspberryPi    作者:mikechan0731    | 项目源码 | 文件源码
def generate_dist_per_sec(self):
        time_end= int(np.amax(self.raw_data['time']))

        #===== acc =====
        #??? x, y ????????????????????????
        ax_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ax']))
        ay_interp_10ms = self.acc_normalize(np.interp(np.arange(0.0,time_end,0.01), self.raw_data['time'], self.raw_data['ay']))
        rxy_interp_10ms = np.sqrt(ax_interp_10ms**2 + ay_interp_10ms**2)

        plt.plot(ax_interp_10ms, c='b')
        plt.plot(ay_interp_10ms, c='g')
        plt.plot(self.detrend_1d(rxy_interp_10ms, time_lst=np.arange(0.0,time_end,0.01)), c='k')

        plt.show()

        axy, vxy, sxy = self.another_integral(rxy_interp_10ms, time_lst= np.arange(0.0,time_end,0.01))
        return axy, vxy, sxy
项目:tf-openpose    作者:ildoonet    | 项目源码 | 文件源码
def get_heatmap(self, target_size):
        heatmap = np.zeros((CocoMetadata.__coco_parts, self.height, self.width))

        for joints in self.joint_list:
            for idx, point in enumerate(joints):
                if point[0] < 0 or point[1] < 0:
                    continue
                CocoMetadata.put_heatmap(heatmap, idx, point, self.sigma)

        heatmap = heatmap.transpose((1, 2, 0))

        # background
        heatmap[:, :, -1] = np.clip(1 - np.amax(heatmap, axis=2), 0.0, 1.0)

        if target_size:
            heatmap = cv2.resize(heatmap, target_size, interpolation=cv2.INTER_AREA)

        return heatmap
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def diagonal(_, pos):
    """
    Given an object pixels' positions, return the diagonal length of its
    bound box
    :param _: pixel values (unused)
    :param pos: pixel position (1-D)
    :return: diagonal of bounding box
    """
    xs = np.array([i / SSIZE for i in pos])
    ys = np.array([i % SSIZE for i in pos])
    minx = np.amin(xs)
    miny = np.amin(ys)
    maxx = np.amax(xs)
    maxy = np.amax(ys)
    return compute_line(np.array([minx, miny]), np.array([maxx, maxy]))
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def binarization (array):
    ''' Takes a binary-class datafile and turn the max value (positive class) into 1 and the min into 0'''
    array = np.array(array, dtype=float) # conversion needed to use np.inf after
    if len(np.unique(array)) > 2:
        raise ValueError ("The argument must be a binary-class datafile. {} classes detected".format(len(np.unique(array))))

    # manipulation which aims at avoid error in data with for example classes '1' and '2'.
    array[array == np.amax(array)] = np.inf
    array[array == np.amin(array)] = 0
    array[array == np.inf] = 1
    return np.array(array, dtype=int)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def test_all_close(name, actual, expected):
    if actual.shape != expected.shape:
        raise ValueError("{:} failed, expected output to have shape {:} but has shape {:}"
                         .format(name, expected.shape, actual.shape))
    if np.amax(np.fabs(actual - expected)) > 1e-6:
        raise ValueError("{:} failed, expected {:} but value is {:}".format(name, expected, actual))
    else:
        print(name, "passed!")
项目:composability_bench    作者:IntelPython    | 项目源码 | 文件源码
def bench_on(runner, sym, Ns, trials, dtype=None):
    global args, kernel, out, mkl_layer
    prepare = globals().get("prepare_"+sym, prepare_default)
    kernel  = globals().get("kernel_"+sym, None)
    if not kernel:
       kernel = getattr(np.linalg, sym)
    out_lvl = runner.__doc__.split('.')[0].strip()
    func_s  = kernel.__doc__.split('.')[0].strip()
    log.debug('Preparing input data for %s (%s).. ' % (sym, func_s))
    args = [prepare(int(i)) for i in Ns]
    it = range(len(Ns))
    # pprint(Ns)
    out = np.empty(shape=(len(Ns), trials))
    b = body(trials)
    tic, toc = (0, 0)
    log.debug('Warming up %s (%s).. ' % (sym, func_s))
    runner(range(1000), empty_work)
    kernel(*args[0])
    runner(range(1000), empty_work)
    log.debug('Benchmarking %s on %s: ' % (func_s, out_lvl))
    gc_old = gc.isenabled()
#    gc.disable()
    tic = time.time()
    runner(it, b)
    toc = time.time() - tic
    if gc_old:
        gc.enable()
    if 'reused_pool' in globals():
        del globals()['reused_pool']

    #calculate average time and min time and also keep track of outliers (max time in the loop)
    min_time = np.amin(out)
    max_time = np.amax(out)
    mean_time = np.mean(out)
    stdev_time = np.std(out)

    #print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time))
    #final_times = [min_time, max_time, mean_time, stdev_time]

    print('## %s: Outter:%s, Inner:%s, Wall seconds:%f\n' % (sym, out_lvl, mkl_layer, float(toc)))
    return out
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def view_trigger_snippets(trigger_snippets, chans, save=None):
    # Create output directory if necessary.
    if os.path.exists(save):
        for f in os.listdir(save):
            p = os.path.join(save, f)
            os.remove(p)
        os.removedirs(save)
    os.makedirs(save)
    # Plot figures.
    fig = pylab.figure()
    for (c, chan) in enumerate(chans):
        ax = fig.add_subplot(1, 1, 1)
        for n in xrange(0, trigger_snippets.shape[2]):
            y = trigger_snippets[:, c, n]
            x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
            b = 0.5 + 0.5 * numpy.random.rand()
            ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
        y = numpy.mean(trigger_snippets[:, c, :], axis=1)
        x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
        ax.plot(x, y, color=(1.0, 0.0, 0.0), linestyle='solid')
        ax.grid(True)
        ax.set_xlim([numpy.amin(x), numpy.amax(x)])
        ax.set_title("Channel %d" %chan)
        ax.set_xlabel("time")
        ax.set_ylabel("amplitude")
        if save is not None:
            # Save plot.
            filename = "channel-%d.png" %chan
            path = os.path.join(save, filename)
            pylab.savefig(path)
        fig.clf()
    if save is None:
        pylab.show()
    else:
        pylab.close(fig)
    return
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def process_scans(scans):  # used for tesing
    scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
    for i in range(scans.shape[0]):
        img=scans[i,:,:]
        img = 255.0 / np.amax(img) * img
        img =img.astype(np.uint8)
        img =cv2.resize(img, (img_rows, img_cols))
        scans1[i,0,:,:]=img
    return (scans1)
项目:vehicle_brand_classification_CNN    作者:nanoc812    | 项目源码 | 文件源码
def imgSeg_logo(approx, himg, wimg):
    w = np.amax(approx[:,:,0])-np.amin(approx[:,:,0]); h = np.amax(approx[:,:,1])-np.amin(approx[:,:,1])
    if float(w)/float(h+0.001) > 4.5:
        h = int(float(w)/3.5)
    w0 = np.amin(approx[:,:,0]); h0 = np.amin(approx[:,:,1])
    h1 = h0-int(3.5*h); h2 = h0;
    w1 = max(w0+w/2-int(0.5*(h2-h1)), 0); w2 = min(w0+w/2+int(0.5*(h2-h1)), wimg-1)
    return h1, h2, w1, w2
项目:vehicle_brand_classification_CNN    作者:nanoc812    | 项目源码 | 文件源码
def imgSeg_rect(approx, himg, wimg):
    w = np.amax(approx[:,:,0])-np.amin(approx[:,:,0]); h = np.amax(approx[:,:,1])-np.amin(approx[:,:,1])
    if float(w)/float(h+0.001) > 4.5:
        h = int(float(w)/3.5)
    w0 = np.amin(approx[:,:,0]); h0 = np.amin(approx[:,:,1])
    h1 = h0-int(3.6*h); h2 = min(h0+int(3*h), himg-1)
    w1 = max(w0+w/2-(h2-h1), 0); w2 = min(w0+w/2+(h2-h1), wimg-1)
    return h1, h2, w1, w2
项目:ml    作者:hohoins    | 项目源码 | 文件源码
def rarmax(vector):
    m = np.amax(vector)
    indices = np.nonzero(vector == m) [0]
    return pr.choice(indices)
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def collect_bounding_box(anno_path, out_filename):
    """ Compute bounding boxes from each instance in original dataset files on
        one room. **We assume the bbox is aligned with XYZ coordinate.**

    Args:
        anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
        out_filename: path to save instance bounding boxes for that room.
            each line is x1 y1 z1 x2 y2 z2 label,
            where (x1,y1,z1) is the point on the diagonal closer to origin
    Returns:
        None
    Note:
        room points are shifted, the most negative point is now at origin.
    """
    bbox_label_list = []

    for f in glob.glob(os.path.join(anno_path, '*.txt')):
        cls = os.path.basename(f).split('_')[0]
        if cls not in g_classes: # note: in some room there is 'staris' class..
            cls = 'clutter'
        points = np.loadtxt(f)
        label = g_class2label[cls]
        # Compute tightest axis aligned bounding box
        xyz_min = np.amin(points[:, 0:3], axis=0)
        xyz_max = np.amax(points[:, 0:3], axis=0)
        ins_bbox_label = np.expand_dims(
            np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
        bbox_label_list.append(ins_bbox_label)

    bbox_label = np.concatenate(bbox_label_list, 0)
    room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
    bbox_label[:, 0:3] -= room_xyz_min 
    bbox_label[:, 3:6] -= room_xyz_min 

    fout = open(out_filename, 'w')
    for i in range(bbox_label.shape[0]):
        fout.write('%f %f %f %f %f %f %d\n' % \
                      (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
                       bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
                       bbox_label[i,6]))
    fout.close()
项目:aapm_thoracic_challenge    作者:xf4j    | 项目源码 | 文件源码
def clean_contour(in_contour, is_prob=False):
    if is_prob:
        pred = (in_contour >= 0.5).astype(np.float32)
    else:
        pred = in_contour
    labels = measure.label(pred)
    area = []
    for l in range(1, np.amax(labels) + 1):
        area.append(np.sum(labels == l))
    out_contour = in_contour
    out_contour[np.logical_and(labels > 0, labels != np.argmax(area) + 1)] = 0
    return out_contour
项目:aapm_thoracic_challenge    作者:xf4j    | 项目源码 | 文件源码
def read_testing_inputs(file, roi, im_size, output_path=None):
    f_h5 = h5py.File(file, 'r')
    if roi == -1:
        images = np.asarray(f_h5['resized_images'], dtype=np.float32)
        read_info = {}
        read_info['shape'] = np.asarray(f_h5['images'], dtype=np.float32).shape
    else:
        images = np.asarray(f_h5['images'], dtype=np.float32)
        output = h5py.File(os.path.join(output_path, 'All_' + os.path.basename(file)), 'r')
        predictions = np.asarray(output['predictions'], dtype=np.float32)
        output.close()
        # Select the roi
        roi_labels = (predictions == roi + 1).astype(np.float32)
        nz = np.nonzero(roi_labels)
        extract = []
        for c in range(3):
            start = np.amin(nz[c])
            end = np.amax(nz[c])
            r = end - start
            extract.append((np.maximum(int(np.rint(start - r * 0.1)), 0),
                            np.minimum(int(np.rint(end + r * 0.1)), images.shape[c])))

        extract_images = images[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]]
        read_info = {}
        read_info['shape'] = images.shape
        read_info['extract_shape'] = extract_images.shape
        read_info['extract'] = extract

        images = resize(extract_images, im_size, mode='constant')

    f_h5.close()
    return images, read_info
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def normalize(im):
  mini = np.amin(im)
  maxi = np.amax(im)
  rng = maxi-mini
  im -= mini
  if rng > 0:
    im /= rng
  return im


# ----- Type transformations --------------------------------------------------
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def test_interpolate(self):
    for dev in ['/gpu:0']:
      batch_size = 3
      h = 3
      w = 4
      d = 3
      grid_shape = [batch_size, h, w, d, 1]
      grid_data = np.zeros(grid_shape).astype(np.float32)
      grid_data[:, :, :, 1 :] = 1.0
      grid_data[:, :, :, 2 :] = 2.0

      guide_shape = [batch_size, 5, 9]
      target_shape = [batch_size, 5, 9, 1]

      for val in range(d):
        target_data = val*np.ones(target_shape)
        target_data = target_data.astype(np.float32)

        guide_data = ((val+0.5)/(1.0*d))*np.ones(guide_shape).astype(np.float32)
        output_data = self.run_bilateral_slice(dev, grid_data, guide_data)
        diff = np.amax(np.abs(target_data-output_data))


        self.assertEqual(target_shape, list(output_data.shape))

        self.assertLess(diff, 5e-4)
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def to_rgb(img):
    img = img.reshape(img.shape[0], img.shape[1])
    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    blue = np.clip(4*(0.75-img), 0, 1)
    red  = np.clip(4*(img-0.25), 0, 1)
    green= np.clip(44*np.fabs(img-0.5)-1., 0, 1)
    rgb = np.stack((red, green, blue), axis=2)
    return rgb
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def _process_data(self, data):
        # normalization
        data = np.clip(np.fabs(data), self.a_min, self.a_max)
        data -= np.amin(data)
        data /= np.amax(data)
        return data
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def plot_prediction(x_test, y_test, prediction, save=False):
    import matplotlib
    import matplotlib.pyplot as plt

    test_size = x_test.shape[0]
    fig, ax = plt.subplots(test_size, 3, figsize=(12,12), sharey=True, sharex=True)

    x_test = crop_to_shape(x_test, prediction.shape)
    y_test = crop_to_shape(y_test, prediction.shape)

    ax = np.atleast_2d(ax)
    for i in range(test_size):
        cax = ax[i, 0].imshow(x_test[i])
        plt.colorbar(cax, ax=ax[i,0])
        cax = ax[i, 1].imshow(y_test[i, ..., 1])
        plt.colorbar(cax, ax=ax[i,1])
        pred = prediction[i, ..., 1]
        pred -= np.amin(pred)
        pred /= np.amax(pred)
        cax = ax[i, 2].imshow(pred)
        plt.colorbar(cax, ax=ax[i,2])
        if i==0:
            ax[i, 0].set_title("x")
            ax[i, 1].set_title("y")
            ax[i, 2].set_title("pred")
    fig.tight_layout()

    if save:
        fig.savefig(save)
    else:
        fig.show()
        plt.show()