Python numpy 模块,column_stack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.column_stack()

项目:ensemble_amazon    作者:kaz-Anova    | 项目源码 | 文件源码
def Make_3way(X, Xt):
    columns_length=X.shape[1]
    for j in range (columns_length):
        for d in range (j+1,columns_length):  
            print("Adding columns' interraction %d and %d" % (j, d) )
            new_column_train=X[:,j]+X[:,d]
            new_column_test=Xt[:,j]+Xt[:,d]    
            X=np.column_stack((X,new_column_train))
            Xt=np.column_stack((Xt,new_column_test))
    for j in range (columns_length):
        for d in range (j+1,columns_length):  
            for m in range (d+1,columns_length):              
                print("Adding columns' interraction %d and %d and %d" % (j, d, m) )
                new_column_train=X[:,j]+X[:,d]+X[:,m]
                new_column_test=Xt[:,j]+Xt[:,d]+Xt[:,m]      
                X=np.column_stack((X,new_column_train))
                Xt=np.column_stack((Xt,new_column_test))            

    return X, Xt
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def random_points_in_circle(n,xx,yy,rr):
  """
  get n random points in a circle.
  """

  rnd = random(size=(n,3))
  t = TWOPI*rnd[:,0]
  u = rnd[:,1:].sum(axis=1)
  r = zeros(n,'float')
  mask = u>1.
  xmask = logical_not(mask)
  r[mask] = 2.-u[mask]
  r[xmask] = u[xmask]
  xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
  dartsxy  = xyp + array([xx,yy])
  return dartsxy
项目:SourceFilterContoursMelody    作者:juanjobosch    | 项目源码 | 文件源码
def MEFromSFFile(fn, outputfile, options):
    """ Computes Melody extractino from a Salience function File
        Parameters
    ----------
    fn: salience function filename
    outputfile: output filename
    options: set of options for melody extraction

    No returns

    """
    from numpy import column_stack, savetxt

    times, SF = loadSFFile(fn)
    times, pitch = MEFromSF(times, SF, options)
    savetxt(outputfile, column_stack((times.T, pitch.T)), fmt='%-7.5f', delimiter=",")
项目:higlass-server    作者:hms-dbmi    | 项目源码 | 文件源码
def loci(request):
    chrom = request.GET.get('chrom', False)
    loop_list = request.GET.get('loop-list', False)

    # Get relative loci
    (loci_rel, chroms) = get_intra_chr_loops_from_looplist(
        path.join('data', loop_list), chrom
    )

    loci_rel_chroms = np.column_stack(
        (chroms[:, 0], loci_rel[:, 0:2], chroms[:, 1], loci_rel[:, 2:4])
    )

    # Create results
    results = {
        'loci': rel_loci_2_obj(loci_rel_chroms)
    }

    return JsonResponse(results)
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def sky(self):
        """
        OSKAR sky model array converted from the input image.

        Columns
        -------
        ra : (J2000) right ascension (deg)
        dec : (J2000) declination (deg)
        flux : source (Stokes I) flux density (Jy)
        """
        idx = self.mask.flatten()
        ra, dec = self.ra_dec
        ra = ra.flatten()[idx]
        dec = dec.flatten()[idx]
        flux = self.image.flatten()[idx] * self.factor_K2JyPixel
        sky_ = np.column_stack([ra, dec, flux])
        return sky_
项目:pauvre    作者:conchoecia    | 项目源码 | 文件源码
def plotArc(start_angle, stop_angle, radius, width, **kwargs):
    """ write a docstring for this function"""
    numsegments = 100
    theta = np.radians(np.linspace(start_angle+90, stop_angle+90, numsegments))
    centerx = 0
    centery = 0
    x1 = -np.cos(theta) * (radius)
    y1 = np.sin(theta) * (radius)
    stack1 = np.column_stack([x1, y1])
    x2 = -np.cos(theta) * (radius + width)
    y2 = np.sin(theta) *  (radius + width)
    stack2 = np.column_stack([np.flip(x2, axis=0), np.flip(y2,axis=0)])
    #add the first values from the first set to close the polygon
    np.append(stack2, [[x1[0],y1[0]]], axis=0)
    arcArray = np.concatenate((stack1,stack2), axis=0)
    return patches.Polygon(arcArray, True, **kwargs), ((x1, y1), (x2, y2))
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def __init__(self, cnn=None, NetworkCode=None, StationCode=None, tref=0, t=None):

        self.c = np.array([])
        self.v = np.array([])

        if t is None:
            ppp_soln = PPP_soln(cnn, NetworkCode, StationCode)
            t = ppp_soln.t

        # t ref (just the beginning of t vector)
        if tref==0:
            tref = np.min(t)

        # offset
        c = np.ones((t.size, 1))

        # velocity
        v = (t - tref)

        self.A = np.column_stack((c, v))
        self.tref = tref
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def __init__(self, cnn=None, NetworkCode=None, StationCode=None, tref=0, t=None):

        self.values = np.array([])

        if t is None:
            ppp_soln = PPP_soln(cnn, NetworkCode, StationCode)
            t = ppp_soln.t

        # t ref (just the beginning of t vector)
        if tref==0:
            tref = np.min(t)

        # offset
        c = np.ones((t.size, 1))

        # velocity
        v = (t - tref)

        self.A = np.column_stack((c, v))
        self.tref = tref
        self.params = 2
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def __call__(self, ts=None, constrains=False):

        if ts is None:
            if constrains:
                if self.J.constrains.size:
                    A = np.resize(self, (self.shape[0] + self.J.constrains.shape[0], self.shape[1]))
                    A[-self.J.constrains.shape[0] - 1:-1,self.L.params:self.L.params + self.J.params] = self.J.constrains
                    return A

                else:
                    return self

            else:
                return self

        else:
            Al = self.L.GetDesignTs(ts)
            Aj = self.J.GetDesignTs(ts)
            Ap = self.P.GetDesignTs(ts)

            As = np.column_stack((Al, Aj)) if Aj.size else Al
            As = np.column_stack((As, Ap)) if Ap.size else As

            return As
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz
项目:plotnine    作者:has2k1    | 项目源码 | 文件源码
def interleave(*arrays):
    """
    Interleave arrays

    All arrays/lists must be the same length

    Parameters
    ----------
    arrays : tup
        2 or more arrays to interleave

    Return
    ------
    out : np.array
        Result from interleaving the input arrays
    """
    return np.column_stack(arrays).ravel()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def show(render, l, grains):

  from numpy import column_stack

  render.set_front([1,1,1,0.05])

  render.set_line_width(render.pix)

  sandstroke = render.sandstroke

  points = column_stack([
    l.xy[:l.itt,0] - l.w[:l.itt],
    l.xy[:l.itt,1] - 0,
    l.xy[:l.itt,0] + l.w[:l.itt],
    l.xy[:l.itt,1] - 0,
  ])

  sandstroke(points, grains)
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_points_in_circle(n,xx,yy,rr):
  """
  get n random points in a circle.
  """


  rnd = random(size=(n,3))
  t = 2.*PI*rnd[:,0]
  u = rnd[:,1:].sum(axis=1)
  r = zeros(n,'float')
  mask = u>1.
  xmask = logical_not(mask)
  r[mask] = 2.-u[mask]
  r[xmask] = u[xmask]
  xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
  dartsxy  = xyp + array([xx,yy])
  return dartsxy
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def sandstroke_non_linear(self,xys,grains=10,left=True):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])

    dd = sqrt(square(dx)+square(dy))

    for i,d in enumerate(dd):
      rnd = sqrt(random((grains,1)))
      if left:
        rnd = 1.0-rnd

      for x,y in xys[i,:2] + directions[i,:]*rnd*d:
        rectangle(x,y,pix,pix)
        fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def sandstroke(self,xys,grains=10):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])

    dd = sqrt(square(dx)+square(dy))

    for i,d in enumerate(dd):
      for x,y in xys[i,:2] + directions[i,:]*random((grains,1))*d:
        rectangle(x,y,pix,pix)
        fill()
项目:paragraph2vec    作者:thunlp    | 项目源码 | 文件源码
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_input(data, padding_id):
    doc_length = [len(d[0]) for d in data]
    sent_length = [len(x) for d in data for x in d[0]]
    if len(sent_length) == 0: sent_length.append(0)

    max_doc_len = max(1, max(doc_length))
    max_sent_len = max(1, max(sent_length))

    idxs = np.column_stack(
            [create_doc_array(d, padding_id, max_doc_len, max_sent_len).ravel() for d in data]
            )
    idxs = idxs.reshape(max_sent_len, max_doc_len, len(data))
    idys = np.array([d[1] for d in data], dtype="int32")

    # relevance
    gold_rels = np.column_stack([np.array([REL_PAD] * (max_doc_len-len(d[2])) + d[2], dtype="int32") for d in data])
    assert gold_rels.shape == (max_doc_len, len(data))

    for d in data: assert len(d[2]) == len(d[0])
    input_lst = [idxs, idys, gold_rels]

    return input_lst
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _vlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 0] / lengths))
    points = np.column_stack([ctrs[:, 0], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _hlines(lines, ctrs=None, lengths=None, vecs=None, angle_lo=20, angle_hi=160, ransac_options=RANSAC_OPTIONS):
    ctrs = ctrs if ctrs is not None else lines.mean(1)
    vecs = vecs if vecs is not None else lines[:, 1, :] - lines[:, 0, :]
    lengths = lengths if lengths is not None else np.hypot(vecs[:, 0], vecs[:, 1])

    angles = np.degrees(np.arccos(vecs[:, 1] / lengths))
    points = np.column_stack([ctrs[:, 1], angles])
    point_indices, = np.nonzero((angles > angle_lo) & (angles < angle_hi))
    points = points[point_indices]
    if len(points) > 2:
        model_ransac = linear_model.RANSACRegressor(**ransac_options)
        model_ransac.fit(points[:, 0].reshape(-1, 1), points[:, 1].reshape(-1, 1))
        inlier_mask = model_ransac.inlier_mask_
        valid_lines = lines[point_indices[inlier_mask], :, :]
    else:
        valid_lines = []
    return valid_lines
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def plot(scheme, interval=numpy.array([[-1.0], [1.0]]), show_axes=False):
    # change default range so that new disks will work
    plt.axis('equal')
    # ax.set_xlim((-1.5, 1.5))
    # ax.set_ylim((-1.5, 1.5))

    if not show_axes:
        plt.gca().set_axis_off()

    plt.plot(interval, [0, 0], color='k')

    pts = numpy.column_stack([scheme.points, numpy.zeros(len(scheme.points))])

    total_area = interval[1] - interval[0]
    helpers.plot_disks_1d(plt, pts, scheme.weights, total_area)
    return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def __init__(self, m):
        k = 4*m + 3
        self.degree = k
        theta = 2*numpy.pi * numpy.arange(1, k+2) / (k+1)
        p, w = numpy.polynomial.legendre.leggauss(m+1)
        # scale points to [r0, r1] (where r0 = 0, r1 = 1 for now)
        p = numpy.sqrt(0.5*(p + 1.0))
        p_theta = numpy.dstack(numpy.meshgrid(p, theta)).reshape(-1, 2).T
        self.points = numpy.column_stack([
            p_theta[0] * numpy.cos(p_theta[1]),
            p_theta[0] * numpy.sin(p_theta[1]),
            ])

        # When integrating between 0 and 1, the weights are exactly the
        # Gauss-Legendre weights, scaled according to the disk area.
        self.weights = numpy.tile(0.5 * numpy.pi / (k+1) * w, k+1)
        return
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def move_ellipses(self, coll, cyl=False):
        xz = self.x[:, ::2] if not cyl else np.column_stack(
           [np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]])
        coll.set_offsets(xz)
        #inside = self.inside_wall()
        #margin = np.nonzero(self.alive)[0][self.inside_wall(2.)]
        colors = np.full((self.N,), "b", dtype=str)
        #colors[margin] = "r"
        colors[self.success] = "k"
        colors[self.fail] = "k"
        colors[self.alive & ~self.can_bind] = "r"
        #colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)]
        coll.set_facecolors(colors)
        #y = self.x[:, 1]
        #d = 50.
        #sizes = self.params.rMolecule*(1. + y/d)
        #coll.set(widths=sizes, heights=sizes)
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def get_training_data():
    dict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(1))
    images = dict[b'data']
    labels = dict[b'labels']
    filenames = dict[b'filenames']

    for i in range(2,5):
        idict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(i));
        dict = np.row_stack((dict,idict))

        iimages = idict[b'data']
        images =  np.row_stack((images,iimages))

        ilabels = idict[b'labels']
        labels = np.column_stack((labels,ilabels))

        ifilenames = idict[b'filenames']
        filenames = np.row_stack((filenames,ifilenames))

    return {b'batch_label':'training batch,40000*3072',b'data':images,b'labels':labels,b'filenames':filenames}
项目:geoviews    作者:ioam    | 项目源码 | 文件源码
def _process_element(self, element):
        if element.interface.datatype == 'geodataframe':
            geoms = element.split(datatype='geom')
            projected = [self.p.projection.project_geometry(geom, element.crs)
                         for geom in geoms]
            new_data = element.data.copy()
            new_data['geometry'] = projected
            return element.clone(new_data, crs=self.p.projection)

        geom_type = Polygon if isinstance(element, Polygons) else LineString
        xdim, ydim = element.kdims[:2]
        projected = []
        for geom in element.split(datatype='columns'):
            xs, ys = geom[xdim.name], geom[ydim.name]
            path = geom_type(np.column_stack([xs, ys]))
            proj = self.p.projection.project_geometry(path, element.crs)
            proj_arr = geom_to_array(proj)
            geom[xdim.name] = proj_arr[:, 0]
            geom[ydim.name] = proj_arr[:, 1]
            projected.append(geom)
        return element.clone(projected, crs=self.p.projection)
项目:pslab-desktop-apps    作者:fossasia    | 项目源码 | 文件源码
def saveData(self):
        try:
            os.mkdir(self.savedir)
        except:
            print('directory exists. overwriting')
        print ('saving to ',self.savedir)

        if self.calibrateOnlyADC: # create ideal dataset for PV1, PV2
            np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([np.linspace(-5,5,4096),np.linspace(-5,5,4096) ]))
            np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([np.linspace(-3.3,3.3,4096),np.linspace(-3.3,3.3,4096) ]))
        else:
            np.savetxt(os.path.join(self.savedir,'PV1_ERR.csv'),np.column_stack([self.A.ADC24['AIN5'],self.A.DAC_VALS['PV1'] ]))
            np.savetxt(os.path.join(self.savedir,'PV2_ERR.csv'),np.column_stack([self.A.ADC24['AIN6'],self.A.DAC_VALS['PV2'] ]))

        np.savetxt(os.path.join(self.savedir,'PV3_ERR.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.DAC_VALS['PV3'] ]))


        np.savetxt(os.path.join(self.savedir,'CALIB_INL.csv'),np.column_stack([self.A.ADC24['AIN7'],self.A.ADCPIC_INL]))
        for a in self.INPUTS:
            if self.I.analogInputSources[a].gainEnabled:
                for b in range(8):
                    raw=self.A.ADC_VALUES[a][b]
                    np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,self.I.gain_values[b])),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][b]],raw]))
            else:
                np.savetxt(os.path.join(self.savedir,'CALIB_%s_%dx.csv'%(a,1)),np.column_stack([np.array(self.A.ADC24['AIN6'])[self.A.ADC_ACTUALS[a][0]],self.A.ADC_VALUES[a][0]]))
项目:orange3-educational    作者:biolab    | 项目源码 | 文件源码
def concat_x_y(self):
        """
        Function takes two selected columns from data table and merge them in
        new Orange.data.Table

        Returns
        -------
        Orange.data.Table
            table with selected columns
        """
        attr_x = self.data.domain[self.attr_x]
        attr_y = self.data.domain[self.attr_y]
        cols = []
        for attr in (attr_x, attr_y):
            subset = self.data[:, attr]
            cols.append(subset.Y if subset.Y.size else subset.X)
        x = np.column_stack(cols)
        not_nan = ~np.isnan(x).any(axis=1)
        x = x[not_nan]  # remove rows with nan
        self.selected_rows = np.where(not_nan)
        domain = Domain([attr_x, attr_y])
        return Table(domain, x)
项目:semi-auto-anno    作者:moberweger    | 项目源码 | 文件源码
def depthToPCL(dpt, T, background_val=0.):

        # get valid points and transform
        pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
        pts = np.concatenate([pts[:, [1, 0]], np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
        pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
        pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))

        # replace the invalid data
        depth = dpt[np.where(~np.isclose(dpt, background_val))]

        # get x and y data in a vectorized way
        row = (pts[:, 0] - 160.) / 241.42 * depth
        col = (pts[:, 1] - 120.) / 241.42 * depth

        # combine x,y,depth
        return np.column_stack((row, col, depth))
项目:semi-auto-anno    作者:moberweger    | 项目源码 | 文件源码
def depthToPCL(dpt, T, background_val=0.):

        # get valid points and transform
        pts = np.asarray(np.where(~np.isclose(dpt, background_val))).transpose()
        pts = np.concatenate([pts[:, [1, 0]], np.ones((pts.shape[0], 1), dtype='float32')], axis=1)
        pts = np.dot(np.linalg.inv(np.asarray(T)), pts.T).T
        pts = (pts[:, 0:2] / pts[:, 2][:, None]).reshape((pts.shape[0], 2))

        # replace the invalid data
        depth = dpt[np.where(~np.isclose(dpt, background_val))]

        # get x and y data in a vectorized way
        row = (pts[:, 0] - 320.) / 460. * depth
        col = (pts[:, 1] - 240.) / 460. * depth

        # combine x,y,depth
        return np.column_stack((row, col, depth))
项目:tinyml    作者:parasdahal    | 项目源码 | 文件源码
def predict(self, data, prob=False):
        """Computes the logistic probability of being a positive example

        Parameters
        ----------
        data : ndarray (n-rows,n-features)
            Test data to score using the current weights
        prob : Boolean
            If set to true, probability will be returned, else binary classification
        Returns
        -------
        0 or 1: int
            0 if probablity is less than 0.5, else 1
        """
        data = np.column_stack((np.ones(data.shape[0]), data))

        hypothesis = LogisticRegression.sigmoid(np.dot(data, self.theta))
        if not prob:
            return np.where(hypothesis >= .5, 1, 0)
        return hypothesis
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(indata, test=False):
    close = indata['close'].values
    diff = np.diff(close)
    diff = np.insert(diff, 0, 0)
    sma15 = SMA(indata, timeperiod=15)
    sma60 = SMA(indata, timeperiod=60)
    rsi = RSI(indata, timeperiod=14)
    atr = ATR(indata, timeperiod=14)

    #--- Preprocess data
    xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))

    xdata = np.nan_to_num(xdata)
    if test == False:
        scaler = preprocessing.StandardScaler()
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
        joblib.dump(scaler, 'data/scaler.pkl')
    elif test == True:
        scaler = joblib.load('data/scaler.pkl')
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
    state = xdata[0:1, 0:1, :]

    return state, xdata, close

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:sl-quant    作者:danielzak    | 项目源码 | 文件源码
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
项目:topical_word_embeddings    作者:thunlp    | 项目源码 | 文件源码
def corpus2dense(corpus, num_terms, num_docs=None, dtype=numpy.float32):
    """
    Convert corpus into a dense numpy array (documents will be columns). You
    must supply the number of features `num_terms`, because dimensionality
    cannot be deduced from the sparse vectors alone.

    You can optionally supply `num_docs` (=the corpus length) as well, so that
    a more memory-efficient code path is taken.

    This is the mirror function to `Dense2Corpus`.

    """
    if num_docs is not None:
        # we know the number of documents => don't bother column_stacking
        docno, result = -1, numpy.empty((num_terms, num_docs), dtype=dtype)
        for docno, doc in enumerate(corpus):
            result[:, docno] = sparse2full(doc, num_terms)
        assert docno + 1 == num_docs
    else:
        result = numpy.column_stack(sparse2full(doc, num_terms) for doc in corpus)
    return result.astype(dtype)
项目:tensorflow_end2end_speech_recognition    作者:hirofumi0810    | 项目源码 | 文件源码
def plot_loss(train_losses, dev_losses, steps, save_path):
    """Save history of training & dev loss as figure.
    Args:
        train_losses (list): train losses
        dev_losses (list): dev losses
        steps (list): steps
    """
    # Save as csv file
    loss_graph = np.column_stack((steps, train_losses, dev_losses))
    if os.path.isfile(os.path.join(save_path, "ler.csv")):
        os.remove(os.path.join(save_path, "ler.csv"))
    np.savetxt(os.path.join(save_path, "loss.csv"), loss_graph, delimiter=",")

    # TODO: error check for inf loss

    # Plot & save as png file
    plt.clf()
    plt.plot(steps, train_losses, blue, label="Train")
    plt.plot(steps, dev_losses, orange, label="Dev")
    plt.xlabel('step', fontsize=12)
    plt.ylabel('loss', fontsize=12)
    plt.legend(loc="upper right", fontsize=12)
    if os.path.isfile(os.path.join(save_path, "loss.png")):
        os.remove(os.path.join(save_path, "loss.png"))
    plt.savefig(os.path.join(save_path, "loss.png"), dvi=500)
项目:orange3-timeseries    作者:biolab    | 项目源码 | 文件源码
def _predict_as_table(self, prediction, confidence):
        from Orange.data import Domain, ContinuousVariable
        means, lows, highs = [], [], []
        n_vars = prediction.shape[2] if len(prediction.shape) > 2 else 1
        for i, name in zip(range(n_vars),
                           self._table_var_names or range(n_vars)):
            mean = ContinuousVariable('{} (forecast)'.format(name))
            low = ContinuousVariable('{} ({:d}%CI low)'.format(name, confidence))
            high = ContinuousVariable('{} ({:d}%CI high)'.format(name, confidence))
            low.ci_percent = high.ci_percent = confidence
            mean.ci_attrs = (low, high)
            means.append(mean)
            lows.append(low)
            highs.append(high)
        domain = Domain(means + lows + highs)
        X = np.column_stack(prediction)
        table = Timeseries.from_numpy(domain, X)
        table.name = (self._table_name or '') + '({} forecast)'.format(self)
        return table
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def distance_as_discrepancy(dist, *summaries, observed):
    """Evaluate a distance function with signature `dist(summaries, observed)` in ELFI."""
    summaries = np.column_stack(summaries)
    # Ensure observed are 2d
    observed = np.concatenate([np.atleast_2d(o) for o in observed], axis=1)
    try:
        d = dist(summaries, observed)
    except ValueError as e:
        raise ValueError('Incompatible data shape for the distance node. Please check '
                         'summary (XA) and observed (XB) output data dimensions. They '
                         'have to be at most 2d. Especially ensure that summary nodes '
                         'outputs 2d data even with batch_size=1. Original error message '
                         'was: {}'.format(e))
    if d.ndim == 2 and d.shape[1] == 1:
        d = d.reshape(-1)
    return d
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def _compute_weights_and_cov(self, pop):
        params = np.column_stack(tuple([pop.outputs[p] for p in self.parameter_names]))

        if self._populations:
            q_logpdf = GMDistribution.logpdf(params, *self._gm_params)
            p_logpdf = self._prior.logpdf(params)
            w = np.exp(p_logpdf - q_logpdf)
        else:
            w = np.ones(pop.n_samples)

        if np.count_nonzero(w) == 0:
            raise RuntimeError("All sample weights are zero. If you are using a prior "
                               "with a bounded support, this may be caused by specifying "
                               "a too small sample size.")

        # New covariance
        cov = 2 * np.diag(weighted_var(params, w))

        if not np.all(np.isfinite(cov)):
            logger.warning("Could not estimate the sample covariance. This is often "
                           "caused by majority of the sample weights becoming zero."
                           "Falling back to using unit covariance.")
            cov = np.diag(np.ones(params.shape[1]))

        return w, cov
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def _initialize_index(self, data_file, regions):
        # self.fields[g.id][fname] is the pattern here
        morton = []
        for ptype in self.ds.particle_types_raw:
            try:
                pos = np.column_stack(self.fields[data_file.filename][
                    (ptype, "particle_position_%s" % ax)] for ax in 'xyz')
            except KeyError:
                pos = self.fields[data_file.filename][ptype, "particle_position"]
            if np.any(pos.min(axis=0) < data_file.ds.domain_left_edge) or \
               np.any(pos.max(axis=0) > data_file.ds.domain_right_edge):
                raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
                                       data_file.ds.domain_left_edge,
                                       data_file.ds.domain_right_edge)
            regions.add_data_file(pos, data_file.file_id)
            morton.append(compute_morton(
                    pos[:,0], pos[:,1], pos[:,2],
                    data_file.ds.domain_left_edge,
                    data_file.ds.domain_right_edge))
        return np.concatenate(morton)
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def particle_vector_functions(ptype, coord_names, vel_names, registry):

    unit_system = registry.ds.unit_system

    # This will column_stack a set of scalars to create vector fields.

    def _get_vec_func(_ptype, names):
        def particle_vectors(field, data):
            v = [data[_ptype, name].in_units(field.units)
                  for name in names]
            c = np.column_stack(v)
            return data.apply_units(c, field.units)
        return particle_vectors
    registry.add_field((ptype, "particle_position"),
                       sampling_type="particle",
                       function=_get_vec_func(ptype, coord_names),
                       units = "code_length")
    registry.add_field((ptype, "particle_velocity"),
                       sampling_type="particle",
                       function=_get_vec_func(ptype, vel_names),
                       units = unit_system["velocity"])
项目:sand-spline    作者:inconvergent    | 项目源码 | 文件源码
def __next__(self):
    try:
      g = next(self.guide)
    except Exception:
      raise StopIteration

    pnum = self.pnum

    r = 1.0-2.0*random(pnum)
    self.noise[:] += r*self.scale

    a = random(pnum)*TWOPI
    rnd = column_stack((cos(a), sin(a)))

    self.path += rnd * reshape(self.noise, (self.pnum,1))
    self.interpolated_path = _rnd_interpolate(self.path, self.inum, ordered=ORDERED)

    self.i+=1
    return g + self.interpolated_path
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def predict_proba(self, X):
        if len(X.shape)==1: # IG modif Feb3 2015
            X = np.reshape(X,(-1,1))   
        prediction = self.predictors[0].predict_proba(X)
        if self.n_label==2:                 # Keep only 1 prediction, 1st column = (1 - 2nd column)
            prediction = prediction[:,1]
        for i in range(1,self.n_target): # More than 1 target, we assume that labels are binary
            new_prediction = self.predictors[i].predict_proba(X)[:,1]
            prediction = np.column_stack((prediction, new_prediction))
        return prediction
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def sq_dist(a, b=None):
    #mean-center for numerical stability
    D, n = a.shape[0], a.shape[1]
    if (b is None):
        mu = a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b = a
        m = n
        aSq = np.sum(a**2, axis=0)
        bSq = aSq
    else:
        d, m = b.shape[0], b.shape[1]
        if (d != D): raise Exception('column lengths must agree')
        mu = (float(m)/float(m+n))*b.mean(axis=1) + (float(n)/float(m+n))*a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b -= mu[:, np.newaxis]      
        aSq = np.sum(a**2, axis=0)
        bSq = np.sum(b**2, axis=0)

    C = np.tile(np.column_stack(aSq).T, (1, m)) + np.tile(bSq, (n, 1)) - 2*a.T.dot(b)
    C = np.maximum(C, 0)    #remove numerical noise
    return C

#evaluate 'power sums' of the individual terms in Z
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def __init__(self, X, pos):
        Kernel.__init__(self)
        self.X_scaled = X/np.sqrt(X.shape[1])
        d = pos.shape[0]
        self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0