Python numpy 模块,vstack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.vstack()

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def visualize(self, vis, colored=True): 

        try: 
            tids = set(self.ids)
        except: 
            return vis

        for hid, hbox in izip(self.ids, self.bboxes): 
            cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1)

        vis = super(BoundingBoxKLT, self).viz(vis, colored=colored)

        # for tid, pts in self.tm_.tracks.iteritems(): 
        #     if tid not in tids: continue
        #     cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, 
        #                   (0,255,0), thickness=1)
        #     tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
        #     cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1)

        # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        return vis
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True):
    if not len(bboxes): 
        return vis

    if not colored: 
        cols = np.tile([240,240,240], [len(bboxes), 1])
    else: 
        N = 20
        cwheel = colormap(np.linspace(0, 1, N))
        cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)])            

    texts = [None] * len(bboxes) if texts is None else texts
    for col, b, t in zip(cols, bboxes, texts): 
        if ellipse: 
            cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360, 
                        color=tuple(col), thickness=1)
        else: 
            cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2)
        if t: 
            annotate_bbox(vis, b, title=t)
    return vis
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
项目:cnn-graph-classification    作者:giannisnik    | 项目源码 | 文件源码
def compute_nystrom(ds_name, use_node_labels, embedding_dim, community_detection_method, kernels):
    if ds_name=="SYNTHETIC":
        graphs, labels = generate_synthetic()
    else:
        graphs, labels = load_data(ds_name, use_node_labels)
    communities, subgraphs = compute_communities(graphs, use_node_labels, community_detection_method)

    print("Number of communities: ", len(communities))
    lens = []
    for community in communities:
        lens.append(community.number_of_nodes())

    print("Average size: %.2f" % np.mean(lens))
    Q=[]
    for idx, k in enumerate(kernels):
        model = Nystrom(k, n_components=embedding_dim)
        model.fit(communities)
        Q_t = model.transform(communities)
        Q_t = np.vstack([np.zeros(embedding_dim), Q_t])
        Q.append(Q_t)

    return Q, subgraphs, labels, Q_t.shape
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def grid_data(source, grid=32, crop=16, expand=12):
    gridsize = grid + 2 * expand
    stacksize = source.shape[0]
    height = source.shape[3]  # should be 224 for our data
    width = source.shape[4]

    gridheight = (height - 2 * crop) // grid  # should be 6 for our data
    gridwidth = (width - 2 * crop) // grid
    cells = []
    for j in range(gridheight):
        for i in range (gridwidth):
            cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
            cells.append(cell)

    cells = np.vstack (cells)

    return cells, gridwidth, gridheight
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def calculate_beta(self):
        """

        .. math::

            \\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}

        http://en.wikipedia.org/wiki/Beta_(finance)
        """
        # it doesn't make much sense to calculate beta for less than two
        # values, so return none.
        if len(self.algorithm_returns) < 2:
            return 0.0

        returns_matrix = np.vstack([self.algorithm_returns,
                                    self.benchmark_returns])
        C = np.cov(returns_matrix, ddof=1)
        algorithm_covariance = C[0][1]
        benchmark_variance = C[1][1]
        beta = algorithm_covariance / benchmark_variance

        return beta
项目:Lattice-Based-Signatures    作者:krishnacharya    | 项目源码 | 文件源码
def KeyGen(**kwargs):
    '''
    Appendix B of BLISS paper
    m_bar = m + n

    o/p:    
    A: Public Key n x m' numpy array
    S: Secret Key m'x n numpy array
    '''
    q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha']
    Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m)
    S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq)
    S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q)
    A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q)
    #return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar
    return util.matrix_to_Zq(A, 2*q), S
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def main():
    fish = loadmat('./data/fish.mat')

    X1 = np.zeros((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X1[:,:-1] = fish['X']
    X2 = np.ones((fish['X'].shape[0], fish['X'].shape[1] + 1))
    X2[:,:-1] = fish['X']
    X = np.vstack((X1, X2))

    Y1 = np.zeros((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y1[:,:-1] = fish['Y']
    Y2 = np.ones((fish['Y'].shape[0], fish['Y'].shape[1] + 1))
    Y2[:,:-1] = fish['Y']
    Y = np.vstack((Y1, Y2))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    callback = partial(visualize, ax=ax)

    reg = affine_registration(X, Y)
    reg.register(callback)
    plt.show()
项目:pointnet    作者:charlesq34    | 项目源码 | 文件源码
def volume_to_point_cloud(vol):
    """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
        return Nx3 numpy array.
    """
    vsize = vol.shape[0]
    assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
    points = []
    for a in range(vsize):
        for b in range(vsize):
            for c in range(vsize):
                if vol[a,b,c] == 1:
                    points.append(np.array([a,b,c]))
    if len(points) == 0:
        return np.zeros((0,3))
    points = np.vstack(points)
    return points

# ----------------------------------------
# Point cloud IO
# ----------------------------------------
项目:GraphTime    作者:GlooperLabs    | 项目源码 | 文件源码
def standard_case(self):
        """Create standard testcase from Thetas defined in this Testcase. The following
        metrics can be calculated by hand and should match the computations:

        precisions: [1, 1, 0, 2/3, 1]
        recalls: [1, 1, 0, 1, 0.5]
        f1s: [1, 1, 0, 0.8, 2/3]
        tps: 1 + 1 + 0 + 2 + 1 = 5
        fps: 0 + 0 + 1 + 1 + 0 = 2
        fns: 0 + 0 + 2 + 0 + 1 = 3
        tns: 2 + 2 + 0 + 0 + 1 = 5
        """
        Theta_true = np.vstack([
            np.repeat(self.Theta_true1[nx, :, :], 2, axis=0),
            np.repeat(self.Theta_true2[nx, :, :], 3, axis=0)
        ])
        Theta_pred = np.vstack([
            np.repeat(self.Theta_pred1[nx, :, :], 3, axis=0),
            self.Theta_pred2[nx, :, :],
            self.Theta_pred3[nx, :, :]
        ])
        return Theta_true, Theta_pred
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  """ A wrapper function to generate anchors given different scales
    Also return the number of anchors in variable 'length'
  """
  anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  A = anchors.shape[0]
  shift_x = np.arange(0, width) * feat_stride
  shift_y = np.arange(0, height) * feat_stride
  shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  K = shifts.shape[0]
  # width changes faster, so here it is H, W, C
  anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  length = np.int32(anchors.shape[0])

  return anchors, length
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_laser_frustum(pose, zmin=0.0, zmax=10, fov=np.deg2rad(60)): 

    N = 30
    curve = np.vstack([(
        RigidTransform.from_rpyxyz(0, 0, rad, 0, 0, 0) * np.array([[zmax, 0, 0]])) 
             for rad in np.linspace(-fov/2, fov/2, N)])

    curve_w = pose * curve

    faces, edges = [], []
    for cpt1, cpt2 in zip(curve_w[:-1], curve_w[1:]): 
        faces.extend([pose.translation, cpt1, cpt2])
        edges.extend([cpt1, cpt2])

    # Connect the last pt in the curve w/ the current pose, 
    # then connect the the first pt in the curve w/ the curr. pose
    edges.extend([edges[-1], pose.translation])
    edges.extend([edges[0], pose.translation])

    faces = np.vstack(faces)
    edges = np.vstack(edges)
    return (faces, edges)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, target, instance, files): 
            self.target = target 
            self.instance = instance
            mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files))
            depth_files = natural_sort(filter(lambda  fn: '_depthcrop.png' in fn, files))
            rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files)))
            loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files))

            # Ensure all have equal number of files (Hack! doesn't ensure filename consistency)
            nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)])
            mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \
                                                            rgb_files[:nfiles], loc_files[:nfiles]

            # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files)
            assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files))

            # Read images
            self.rgb = ImageDatasetReader.from_filenames(rgb_files)
            self.depth = ImageDatasetReader.from_filenames(depth_files)
            self.mask = ImageDatasetReader.from_filenames(mask_files)

            # Read top-left locations of bounding box
            self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) 
                                        for loc in loc_files])
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, frame=None): 
        """
        Load annotation from json
        """
        self.annotations_ = []

        # Retrieve polygons
        try: 
            polygons = frame['polygon']
        except: 
            return

        # For each polygon
        for poly in polygons: 

            # Get coordinates
            xy = np.vstack([np.float32(poly['x']), 
                            np.float32(poly['y'])]).T

            # Object ID (from local annotation file)
            object_id = poly['object']
            self.add(poly['object'], xy)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def im_detect_and_describe(img, mask=None, detector='dense', descriptor='SIFT', colorspace='gray',
                           step=4, levels=7, scale=np.sqrt(2)): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    """
    detector = get_detector(detector=detector, step=step, levels=levels, scale=scale)
    extractor = cv2.DescriptorExtractor_create(descriptor)

    try:     
        kpts = detector.detect(img, mask=mask)
        kpts, desc = extractor.compute(img, kpts)

        if descriptor == 'SIFT': 
            kpts, desc = root_sift(kpts, desc)

        pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
        return pts, desc

    except Exception as e: 
        print 'im_detect_and_describe', e
        return None, None
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def im_describe(*args, **kwargs): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    Sugar for description-only call. 
    """
    kpts, desc = im_detect_and_describe(*args, **kwargs)
    return desc

# def color_codes(img, kpts): 
#     # Extract color information (Lab)
#     pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
#     imgc = median_blur(img, size=5) 
#     cdesc = img[pts[:,1], pts[:,0]]
#     return kpts, np.hstack([desc, cdesc])


# =====================================================================
# General-purpose object recognition interfaces, and functions
# ---------------------------------------------------------------------
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def points_and_normals(self): 
        """
        Returns the point/normals parametrization for planes, 
        including clipped zmin and zmax frustums

        Note: points need to be in CCW
        """

        nv1, fv1 = self._front_back_vertices
        nv2 = np.roll(nv1, -1, axis=0)
        fv2 = np.roll(fv1, -1, axis=0)

        vx = np.vstack([fv1-nv1, nv2[0]-nv1[0], fv1[2]-fv1[1]])
        vy = np.vstack([fv2-fv1, nv2[1]-nv2[0], fv1[1]-fv1[0]])
        pts = np.vstack([fv1, nv1[0], fv1[1]])

        # vx += 1e-12
        # vy += 1e-12

        vx /= np.linalg.norm(vx, axis=1).reshape(-1,1)
        vy /= np.linalg.norm(vy, axis=1).reshape(-1,1)

        normals = np.cross(vx, vy)
        normals /= np.linalg.norm(normals, axis=1).reshape(-1,1)
        return pts, normals
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
项目:chash    作者:luhsra    | 项目源码 | 文件源码
def plotTimeMultiHistogram(parseTimes, hashTimes, compileTimes, filename): # times in ms
    bins = np.linspace(0, 5000, 50)
    data = np.vstack([parseTimes, hashTimes, compileTimes]).T
    fig, ax = plt.subplots()
    plt.hist(data, bins, alpha=0.7, label=['parsing', 'hashing', 'compiling'], color=[parseColor, hashColor, compileColor])
    plt.legend(loc='upper right')
    plt.xlabel('time [ms]')
    plt.ylabel('#files')
    fig.savefig(filename)

    fig, ax = plt.subplots()
    boxplot_data = [[i/1000 for i in parseTimes], [i/1000 for i in hashTimes], [i/1000 for i in compileTimes]] # times to s
    plt.boxplot(boxplot_data, 0, 'rs', 0, [5, 95])
    plt.xlabel('time [s]')
    plt.yticks([1, 2, 3], ['parsing', 'hashing', 'compiling'])
    #lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # legend on the right
    fig.savefig(filename[:-4] + '_boxplots' + GRAPH_EXTENSION)
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def take_product(do_dict):
    '''
    this function takes some dictionary like:
        {key1:1, key2:[a,b], key3:[c,d]}
    and returns the dictionary:
        {key1:[1,1,1], key2[a,a,b,b,],key3[c,d,c,d]}
    computing the product of values
    '''
    values=[]
    for v in do_dict.values():
        if hasattr(v,'__iter__'):
            values.append(v)
        else:
            values.append([v])#allows scalar to be passed

    prod_values=np.vstack(product(*values))
    return {k:np.array(v) for k,v in zip(do_dict.keys(),zip(*prod_values))}
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def get_interv_table(model,intrv=True):

    n_batches=25
    table_outputs=[]
    d_vals=np.linspace(TINY,0.6,n_batches)
    for name in model.cc.node_names:
        outputs=[]
        for d_val in d_vals:
            do_dict={model.cc.node_dict[name].label_logit : d_val*np.ones((model.batch_size,1))}
            outputs.append(model.sess.run(model.fake_labels,do_dict))

        out=np.vstack(outputs)
        table_outputs.append(out)

    table=np.stack(table_outputs,axis=2)

    np.mean(np.round(table),axis=0)

    return table

#dT=pd.DataFrame(index=p_names, data=T, columns=do_names)
#T=np.mean(np.round(table),axis=0)
#table=get_interv_table(model)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __loadChnTimeWave(self,f,selectChan):
        times = list()
        waveforms = list()
        spk_startswith = "spike_{0}".format(selectChan)
        for chn_unit in f["spikes"].keys():
            if chn_unit.startswith(spk_startswith):
                time = f["spikes"][chn_unit]["times"].value
                waveform = f["spikes"][chn_unit]["waveforms"].value
                times.append(time)
                waveforms.append(waveform)
        if times:
            times = np.hstack(times)
            waveforms = np.vstack(waveforms)
            sort_index = np.argsort(times)
            waveforms = waveforms[sort_index]
            times = times[sort_index]
            return times,waveforms
        else:
            return None,None
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __load_waveforms(self,selectChan,file_name):
        spk_startswith = "spike_{0}".format(selectChan)
        with hp.File(file_name,"r") as f:
            times = list()
            waveforms = list()
            for chn_unit in f["spikes"].keys():
                if chn_unit.startswith(spk_startswith):
                    tep_time = f["spikes"][chn_unit]["times"].value
                    waveform = f["spikes"][chn_unit]["waveforms"].value
                    times.append(tep_time)
                    waveforms.append(waveform)
            if times:
                times = np.hstack(times)
                waveforms = np.vstack(waveforms)
                sort_index = np.argsort(times)
                waveforms = waveforms[sort_index]
                return waveforms
            else:
                return None
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _extract_signals(self, data, metadata, lazy):

        signal = None
        if lazy and data.size > 0:
            signal = AnalogSignal([],
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt']*pq.ms)
            signal.lazy_shape = None
        else:
            arr = numpy.vstack(self._extract_array(data, channel_index)
                               for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
            if len(arr) > 0:
                signal = AnalogSignal(arr.T,
                                      units=self._determine_units(metadata),
                                      sampling_period=metadata['dt']*pq.ms)
        if signal is not None:
            signal.annotate(label=metadata["label"],
                            variable=metadata["variable"])
        return signal
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __load_waveforms(self,selectChan,file_name):
        spk_startswith = "spike_{0}".format(selectChan)
        with hp.File(file_name,"r") as f:
            times = list()
            waveforms = list()
            for chn_unit in f["spikes"].keys():
                if chn_unit.startswith(spk_startswith):
                    tep_time = f["spikes"][chn_unit]["times"].value
                    waveform = f["spikes"][chn_unit]["waveforms"].value
                    times.append(tep_time)
                    waveforms.append(waveform)
            if times:
                times = np.hstack(times)
                waveforms = np.vstack(waveforms)
                sort_index = np.argsort(times)
                waveforms = waveforms[sort_index]
                return waveforms
            else:
                return None
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _extract_signals(self, data, metadata, lazy):

        signal = None
        if lazy and data.size > 0:
            signal = AnalogSignal([],
                                  units=self._determine_units(metadata),
                                  sampling_period=metadata['dt']*pq.ms)
            signal.lazy_shape = None
        else:
            arr = numpy.vstack(self._extract_array(data, channel_index)
                               for channel_index in range(metadata['first_index'], metadata['last_index'] + 1))
            if len(arr) > 0:
                signal = AnalogSignal(arr.T,
                                      units=self._determine_units(metadata),
                                      sampling_period=metadata['dt']*pq.ms)
        if signal is not None:
            signal.annotate(label=metadata["label"],
                            variable=metadata["variable"])
        return signal
项目:ensemble_amazon    作者:kaz-Anova    | 项目源码 | 文件源码
def predict_proba(self, X): 
    try:
      rows=(X.shape[0])
    except:
      rows=len(X)
    X1 = self.build_matrix(X)
    if  self.k_models!=None and len(self.k_models)<2:
        predictions = self.bst.predict(X1)
    else :
        dtest = xgb.DMatrix(X)
        predictions= None
        for gbdt in self.k_models:
            predsnew = gbdt.predict(dtest, ntree_limit=(gbdt.best_iteration+1)*self.num_parallel_tree)  
            if predictions==None:
                predictions=predsnew
            else:
                for g in range (0, predsnew.shape[0]):
                    predictions[g]+=predsnew[g]
        for g in range (0, len(predictions)):
            predictions[g]/=float(len(self.k_models))               
        predictions=np.array(predictions)
    if self.objective == 'multi:softprob': return predictions.reshape( rows, self.num_class)
    return np.vstack([1 - predictions, predictions]).T
项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow    作者:GianlucaPaolocci    | 项目源码 | 文件源码
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
    ignored = 0
    features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
    for label, sub_dir in enumerate(sub_dirs):
        print sub_dir
        for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
            try:
                mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
                ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
                features = np.vstack([features,ext_features])
                l = [fn.split('-')[1]] * (mfccs.shape[0])
                labels = np.append(labels, l)
        except (KeyboardInterrupt, SystemExit):
        raise
            except:
                ignored += 1
    print "Ignored files: ", ignored
    return np.array(features), np.array(labels, dtype = np.int)
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    im_shapes = np.zeros((0, 2), dtype=np.float32)
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale, im_shape = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size)
        im_scales.append(im_scale)
        processed_ims.append(im)
        im_shapes = np.vstack((im_shapes, im_shape))

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales, im_shapes
项目:PyGPS    作者:gregstarr    | 项目源码 | 文件源码
def _block2df(block,obstypes,svnames,svnum):
    """
    input: block of text corresponding to one time increment INTERVAL of RINEX file
    output: 2-D array of float64 data from block. Future: consider whether best to use Numpy, Pandas, or Xray.
    """
    nobs = len(obstypes)
    stride=3

    strio = BytesIO(block.encode())
    barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')

    data = barr[:,0:nobs*stride:stride]
    lli  = barr[:,1:nobs*stride:stride]
    ssi  = barr[:,2:nobs*stride:stride]

    data = np.vstack(([data.T],[lli.T],[ssi.T])).T

    return data
项目:PyGPS    作者:gregstarr    | 项目源码 | 文件源码
def _block2df(block,obstypes,svnames,svnum):
    """
    input: block of text corresponding to one time increment INTERVAL of RINEX file
    output: 2-D array of float64 data from block. Future: consider whether best to use Numpy, Pandas, or Xray.
    """
    nobs = len(obstypes)
    stride=3

    strio = BytesIO(block.encode())
    barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')

    data = barr[:,0:nobs*stride:stride]
    lli  = barr[:,1:nobs*stride:stride]
    ssi  = barr[:,2:nobs*stride:stride]

    data = np.vstack(([data.T],[lli.T],[ssi.T])).T

    return data
项目:PyGPS    作者:gregstarr    | 项目源码 | 文件源码
def _block2df(block,obstypes,svnames,svnum):
    """
    input: block of text corresponding to one time increment INTERVAL of RINEX file
    output: 2-D array of float64 data from block.
    """
    nobs = len(obstypes)
    stride=3

    strio = BytesIO(block.encode())
    barr = np.genfromtxt(strio, delimiter=(14,1,1)*5).reshape((svnum,-1), order='C')

    data = barr[:,0:nobs*stride:stride]
    lli  = barr[:,1:nobs*stride:stride]
    ssi  = barr[:,2:nobs*stride:stride]

    data = np.vstack(([data],[lli],[ssi])).T #4D numpy array

    return data
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None):
    "global variables: L0_reg_ind"
    edge_values = np.vstack([Z_min * rho_lb,
                             Z_max * rho_lb,
                             Z_min * rho_ub,
                             Z_max * rho_ub])

    if L0_max is None or L0_max == Z_min.shape[0]:
        s_min = np.sum(np.min(edge_values, axis = 0))
        s_max = np.sum(np.max(edge_values, axis = 0))
    else:
        min_values = np.min(edge_values, axis = 0)
        s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
        s_min_no_reg = np.sum(min_values[~L0_reg_ind])
        s_min = s_min_reg + s_min_no_reg

        max_values = np.max(edge_values, axis = 0)
        s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
        s_max_no_reg = np.sum(max_values[~L0_reg_ind])
        s_max = s_max_reg + s_max_no_reg

    return s_min, s_max


#setup weights
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def get_score_bounds(Z_min, Z_max, rho_lb, rho_ub, L0_reg_ind = None, L0_max = None):
    edge_values = np.vstack([Z_min * rho_lb,
                             Z_max * rho_lb,
                             Z_min * rho_ub,
                             Z_max * rho_ub])

    if (L0_max is None) or (L0_reg_ind is None) or (L0_max == Z_min.shape[0]):
        s_min = np.sum(np.min(edge_values, axis=0))
        s_max = np.sum(np.max(edge_values, axis=0))
    else:
        min_values = np.min(edge_values, axis=0)
        s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
        s_min_no_reg = np.sum(min_values[~L0_reg_ind])
        s_min = s_min_reg + s_min_no_reg

        max_values = np.max(edge_values, axis=0)
        s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
        s_max_no_reg = np.sum(max_values[~L0_reg_ind])
        s_max = s_max_reg + s_max_no_reg

    return s_min, s_max
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def _makeflat(self, start=None, end=None, groups = False):     
        eeg = list()
        for sub in self.data[start:end]:
            if len(sub) % self.chunk_len == 0:
                eeg.append(sub.reshape([-1, self.chunk_len,3]))
            else:
                print('ERROR: Please choose a chunk length that is a factor of {}. Current len = {}'.format(self.samples_per_epoch, len(sub)))
                return [0,0]
        hypno = list()
        group = list()
        hypno_repeat = self.samples_per_epoch / self.chunk_len
        idx = 0
        for sub in self.hypno[start:end]:
            hypno.append(np.repeat(sub, hypno_repeat))
            group.append(np.repeat(idx, len(hypno[-1])))
            idx += 1

        if groups:
            return np.vstack(eeg), np.hstack(hypno), np.hstack(group)
        else:
            return np.vstack(eeg), np.hstack(hypno)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def natural_key(string_):
    """See http://www.codinghorror.com/blog/archives/001018.html"""
    return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]   

#%%

#l=a['feat_eeg']
#val_acc = [y[0] for y in [x for x in l]]
#val_f1 = [y[1] for y in [x for x in l]]
#test_acc = [y[2] for y in [x for x in l]]
#test_f1 = [y[3] for y in [x for x in l]]
#
#val = np.vstack([val_acc, val_f1]).T
#test = np.vstack([test_acc, test_f1]).T

#a   = pickle.load(open('./results_dataset_feat_edfx','rb'))
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def enroll(self, enroll_features):
    """enroll(enroll_features) -> model

    Enrolls the model by storing all given input vectors.

    **Parameters:**

    enroll_features : [1D :py:class:`numpy.ndarray`]
      The list of projected features to enroll the model from.

    **Returns:**

    model : 2D :py:class:`numpy.ndarray`
      The enrolled model.
    """
    assert len(enroll_features)
    [self._check_feature(feature, True) for feature in enroll_features]
    # just store all the features
    return numpy.vstack(enroll_features)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def enroll(self, enroll_features):
    """enroll(enroll_features) -> model

    Enrolls the model by storing all given input vectors.

    **Parameters:**

    ``enroll_features`` : [:py:class:`numpy.ndarray`]
      The list of projected features to enroll the model from.

    **Returns:**

    ``model`` : 2D :py:class:`numpy.ndarray`
      The enrolled model.
    """
    assert len(enroll_features)
    [self._check_feature(feature) for feature in enroll_features]
    # just store all the features
    return numpy.vstack(f.flatten() for f in enroll_features)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def train_projector(self, training_features, projector_file):
    """Generates the PCA covariance matrix and writes it into the given projector_file.

    **Parameters:**

    training_features : [1D :py:class:`numpy.ndarray`]
      A list of 1D training arrays (vectors) to train the PCA projection matrix with.

    projector_file : str
      A writable file, into which the PCA projection matrix (as a :py:class:`bob.learn.linear.Machine`) and the eigenvalues will be written.
    """
    # Assure that all data are 1D
    [self._check_feature(feature) for feature in training_features]

    # Initializes the data
    data = numpy.vstack(training_features)
    logger.info("  -> Training LinearMachine using PCA")
    t = bob.learn.linear.PCATrainer()
    self.machine, self.variances = t.train(data)
    # For re-shaping, we need to copy...
    self.variances = self.variances.copy()

    # compute variance percentage, if desired
    if isinstance(self.subspace_dim, float):
      cummulated = numpy.cumsum(self.variances) / numpy.sum(self.variances)
      for index in range(len(cummulated)):
        if cummulated[index] > self.subspace_dim:
          self.subspace_dim = index
          break
      self.subspace_dim = index
    logger.info("    ... Keeping %d PCA dimensions", self.subspace_dim)
    # re-shape machine
    self.machine.resize(self.machine.shape[0], self.subspace_dim)
    self.variances.resize(self.subspace_dim)

    f = bob.io.base.HDF5File(projector_file, "w")
    f.set("Eigenvalues", self.variances)
    f.create_group("Machine")
    f.cd("/Machine")
    self.machine.save(f)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def enroll(self, enroll_features):
    """enroll(enroll_features) -> model

    Enrolls the model by storing all given input vectors.

    **Parameters:**

    enroll_features : [1D :py:class:`numpy.ndarray`]
      The list of projected features to enroll the model from.

    **Returns:**

    model : 2D :py:class:`numpy.ndarray`
      The enrolled model.
    """
    assert len(enroll_features)
    [self._check_feature(feature, True) for feature in enroll_features]
    # just store all the features
    return numpy.vstack(enroll_features)
项目:bob.bio.base    作者:bioidiap    | 项目源码 | 文件源码
def _scores_d_normalize(t_model_ids, group):
  """Compute normalized D scores for the given T-model ids"""
  # the file selector object
  fs = FileSelector.instance()

  # initialize D and D_same_value matrices
  d_for_all = None
  d_same_value = None
  for t_model_id in t_model_ids:
    tmp = bob.io.base.load(fs.d_file(t_model_id, group))
    tmp2 = bob.io.base.load(fs.d_same_value_file(t_model_id, group))
    if d_for_all is None and d_same_value is None:
      d_for_all = tmp
      d_same_value = tmp2
    else:
      d_for_all = numpy.vstack((d_for_all, tmp))
      d_same_value = numpy.vstack((d_same_value, tmp2))

  # Saves to files
  bob.io.base.save(d_for_all, fs.d_matrix_file(group))
  bob.io.base.save(d_same_value, fs.d_same_value_matrix_file(group))
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def x_frame2D(X, plot_limits=None, resolution=None):
    """
    Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
    """

    assert X.shape[1] == 2, \
        'x_frame2D is defined for two-dimensional inputs'
    if plot_limits is None:
        (xmin, xmax) = (X.min(0), X.max(0))
        (xmin, xmax) = (xmin - 0.2 * (xmax - xmin), xmax + 0.2 * (xmax
                        - xmin))
    elif len(plot_limits) == 2:
        (xmin, xmax) = plot_limits
    else:
        raise ValueError, 'Bad limits for plotting'

    resolution = resolution or 50
    (xx, yy) = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:
                        xmax[1]:1j * resolution]
    Xnew = np.vstack((xx.flatten(), yy.flatten())).T
    return (Xnew, xx, yy, xmin, xmax)
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def loadX(fname):
    '''
    Read data records into a data matrix.
    Also return vocabulary.
    '''

    events = []
    words_keys = set()
    for e in load(fname):
        events.append(e)
        words_keys = words_keys | set(e[5].keys())
    words_keys = sorted(list(words_keys))
    for (eidx, e) in enumerate(events):
        events[eidx] = list(e[:5]) + [e[5].get(word_key, 0)
                for word_key in words_keys]
    X = np.vstack(events)
    return (X, words_keys)
项目:deeppavlov    作者:deepmipt    | 项目源码 | 文件源码
def get_document_batch(self, doc_id):
        """builds batch of all mention pairs in one document

        Args:
            doc_id: id of document

        Returns:
            feature representation of mentions and labels
        """
        mentions = self.dl.get_all_mentions_from_doc(doc_id)
        if len(mentions) == 0:
            return None, None
        A, B = [], []
        for a in mentions:
            for b in mentions:
                A.append(a)
                B.append(b)
        A_f = [self._mention_to_features(m) for m in A]
        B_f = [self._mention_to_features(m) for m in B]
        AB_f = self._pair_features(A, B)
        A = [self.dl.mention_features[m] for m in A]
        B = [self.dl.mention_features[m] for m in B]
        return np.vstack(A), np.stack(A_f), np.vstack(B), np.stack(B_f), np.stack(AB_f)
项目:joysix    作者:niberger    | 项目源码 | 文件源码
def log(p):
    q = p.rot
    t = p.trans
    r = quat.log(q)
    D = quat.dlog(r)
    return np.vstack((r, D * t))
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
    # Receptive Fields Summary
    try:
        W = layer.W
    except:
        W = layer
    wp = W.eval().transpose();
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) 
    else:           # Convolutional layer already has shape
        features, channels, iy, ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))

    fig = mpl.figure(figOffset); mpl.clf()

    # Using image grid
    from mpl_toolkits.axes_grid1 import ImageGrid
    grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
    for i in range(0,np.shape(fields)[0]):
        im = grid[i].imshow(fields[i],cmap=cmap); 

    grid.cbar_axes[0].colorbar(im)
    mpl.title('%s Receptive Fields' % layer.name)

    # old way
    # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    # tiled = []
    # for i in range(0,perColumn*perRow,perColumn):
    #   tiled.append(np.hstack(fields2[i:i+perColumn]))
    # 
    # tiled = np.vstack(tiled)
    # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
    mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
    # Output summary
    try:
        W = layer.output
    except:
        W = layer
    wp = W.eval(feed_dict=feed_dict);
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
        fields = np.reshape(temp,[1]+fieldShape)
    else:           # Convolutional layer already has shape
        wp = np.rollaxis(wp,3,0)
        features, channels, iy,ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
    fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    tiled = []
    for i in range(0,perColumn*perRow,perColumn):
        tiled.append(np.hstack(fields2[i:i+perColumn]))

    tiled = np.vstack(tiled)
    if figOffset is not None:
        mpl.figure(figOffset); mpl.clf(); 

    mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01):
    # Receptive Fields Summary
    W = layer.W
    wp = W.eval().transpose();
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
    else:           # Convolutional layer already has shape
        features, channels, iy, ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    fieldsN = min(fields.shape[0],maxFields)
    perRow = int(math.floor(math.sqrt(fieldsN)))
    perColumn = int(math.ceil(fieldsN/float(perRow)))

    fig = mpl.figure(figName); mpl.clf()

    # Using image grid
    from mpl_toolkits.axes_grid1 import ImageGrid
    grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
    for i in range(0,fieldsN):
        im = grid[i].imshow(fields[i],cmap=cmap);

    grid.cbar_axes[0].colorbar(im)
    mpl.title('%s Receptive Fields' % layer.name)

    # old way
    # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    # tiled = []
    # for i in range(0,perColumn*perRow,perColumn):
    #   tiled.append(np.hstack(fields2[i:i+perColumn]))
    #
    # tiled = np.vstack(tiled)
    # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
    mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
项目:IntroToDeepLearning    作者:robb-brown    | 项目源码 | 文件源码
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
    # Output summary
    W = layer.output
    wp = W.eval(feed_dict=feed_dict);
    if len(np.shape(wp)) < 4:       # Fully connected layer, has no shape
        temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
        fields = np.reshape(temp,[1]+fieldShape)
    else:           # Convolutional layer already has shape
        wp = np.rollaxis(wp,3,0)
        features, channels, iy,ix = np.shape(wp)
        if channel is not None:
            fields = wp[:,channel,:,:]
        else:
            fields = np.reshape(wp,[features*channels,iy,ix])

    perRow = int(math.floor(math.sqrt(fields.shape[0])))
    perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
    fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
    tiled = []
    for i in range(0,perColumn*perRow,perColumn):
        tiled.append(np.hstack(fields2[i:i+perColumn]))

    tiled = np.vstack(tiled)
    if figOffset is not None:
        mpl.figure(figOffset); mpl.clf();

    mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_tm_opp(pts1, pts2):
    # Transformation matrix - ( Translation + Scaling + Rotation )
    # using Procuster analysis
    pts1 = np.float64(pts1)
    pts2 = np.float64(pts2)

    m1 = np.mean(pts1, axis = 0)
    m2 = np.mean(pts2, axis = 0)

    # Removing translation
    pts1 -= m1
    pts2 -= m2

    std1 = np.std(pts1)
    std2 = np.std(pts2)
    std_r = std2/std1

    # Removing scaling
    pts1 /= std1
    pts2 /= std2

    U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)

    # Finding the rotation matrix
    R = np.transpose(U * V)

    return np.vstack([np.hstack((std_r * R,
        np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])