Python numpy 模块,row_stack() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.row_stack()

项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _get_glyph(gnum, height, width, shift_prob, shift_size):
  if isinstance(gnum, list):
    n = randint(*gnum)
  else:
    n = gnum

  glyph = random_points_in_circle(
      n, 0, 0, 0.5
      )*array((width, height), 'float')
  _spatial_sort(glyph)

  if random()<shift_prob:
    shift = ((-1)**randint(0,2))*shift_size*height
    glyph[:,1] += shift
  if random()<0.5:
    ii = randint(0,n-1,size=(1))
    xy = glyph[ii,:]
    glyph = row_stack((glyph, xy))


  return glyph
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def find_stable_a(self, A, cond_h, cond_s):

        # build the different combinations of
        # condition equations
        condeq = []
        if cond_h.size > 0:
            condeq.append(cond_h)
        condeq.append(cond_s)
        if cond_h.size > 0:
            condeq.append(numpy.row_stack((cond_s,cond_h)))

        condnum = []
        condnum.append(numpy.linalg.cond(A))

        for cond in condeq:
            condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))

        i = numpy.argmin(numpy.array(condnum))

        if i == 0:
            return numpy.array([])
        else:
            return condeq[i-1]
项目:ExperimentPackage_PyTorch    作者:ICEORY    | 项目源码 | 文件源码
def getallweights(model):
    # get weights from model
    model_list = MD.model2list(model)
    weight_np = None
    for i in range(len(model_list)):
        model_state_dict = model_list[i].state_dict()
        for k, d in model_state_dict.items():
            k_split = k.split(".")
            if k_split[-1] == "weight":
                d_np = d.cpu().numpy()
                d_np = d_np.reshape(d_np.size, 1)
                if weight_np is None:
                    weight_np = d_np
                else:
                    weight_np = np.row_stack((weight_np, d_np))
    return weight_np
项目:deep_learning_study    作者:jowettcz    | 项目源码 | 文件源码
def get_training_data():
    dict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(1))
    images = dict[b'data']
    labels = dict[b'labels']
    filenames = dict[b'filenames']

    for i in range(2,5):
        idict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(i));
        dict = np.row_stack((dict,idict))

        iimages = idict[b'data']
        images =  np.row_stack((images,iimages))

        ilabels = idict[b'labels']
        labels = np.column_stack((labels,ilabels))

        ifilenames = idict[b'filenames']
        filenames = np.row_stack((filenames,ifilenames))

    return {b'batch_label':'training batch,40000*3072',b'data':images,b'labels':labels,b'filenames':filenames}
项目:svg-sorter    作者:inconvergent    | 项目源码 | 文件源码
def main(args, **argv):

  from numpy import row_stack

  fn = args.fn
  out = args.out

  # w = 1000
  # h = 1000

  paths = get_lines_from_svg(fn, out)
  mi, ma, move = get_mid(row_stack(paths))
  paths, _ = spatial_sort(paths)
  paths = spatial_concat(paths)
  paths = align_left(paths, mi)

  w, h = ma - mi
  if args.svgwrite:
    export_svg_svgwrite(out, paths, w, h, line_width=1)
  else:
    export_svg(out, paths, w, h, line_width=1)

  # return
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def prepare_inputs(*inputs, **kwinputs):
    """Prepare the inputs for the simulator.

    The signature follows that given in `elfi.tools.external_operation`. This function
    appends kwinputs with unique and descriptive filenames and writes an input file for
    the bdm executable.
    """
    alpha, delta, tau, N = inputs
    meta = kwinputs['meta']

    # Organize the parameters to an array. The broadcasting works nicely with constant
    # arguments.
    param_array = np.row_stack(np.broadcast(alpha, delta, tau, N))

    # Prepare a unique filename for parallel settings
    filename = '{model_name}_{batch_index}_{submission_index}.txt'.format(**meta)
    np.savetxt(filename, param_array, fmt='%.4f %.4f %.4f %d')

    # Add the filenames to kwinputs
    kwinputs['filename'] = filename
    kwinputs['output_filename'] = filename[:-4] + '_out.txt'

    # Return new inputs that the command will receive
    return inputs, kwinputs
项目:starcraft-stacked-graph-service    作者:ibm-dev-incubator    | 项目源码 | 文件源码
def draw_matplot_graph(unit_supplies, replay):
    units = sorted(list(unit_supplies.keys()))
    y = NP.row_stack([ unit_supplies[i] for i in units ])
    # this call to 'cumsum' (cumulative sum), passing in your y data, 
    # is necessary to avoid having to manually order the datasets
    x = times
    y_stack = NP.cumsum(y, axis=0)   # a 3x10 array

    fig = PLT.figure()
    ax1 = fig.add_subplot(111)

    patches = []

    ax1.fill_between(x, 0, y_stack[0,:], facecolor="#CC6666", alpha=.7)
    patches.append(mpatches.Patch(color="#CC6666", label=units[0], alpha=.7))

    for index, key in enumerate(units[1:]):
        color = "#" +  hashlib.sha224(bytes(key, 'utf-8')).hexdigest()[:6]

        patches.append(mpatches.Patch(color=color, label=key, alpha=.7))
        ax1.fill_between(x, y_stack[index,:], y_stack[index+1,:], facecolor=color, alpha=.7)

    PLT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
               ncol=2, mode="expand", borderaxespad=0., handles=patches)
    PLT.show()
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def fill_in_missing_dates(df, date_col_name, other_col):
    startd = df[date_col_name].values[0]
    endd = df[date_col_name].values[-1]
    print startd, endd
    idx = pd.date_range(startd, endd)

    dict = {}
    for index, row in df.iterrows():
        dict[row[date_col_name]] = row[other_col]

    new_data = []
    for d in idx:
        pydate = d.to_pydatetime()
        daskey = pydate.strftime('%Y-%m-%d')
        new_data.append([daskey, dict[daskey] if dict.has_key(daskey) else None])

    return np.row_stack(new_data)
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def fill_in_missing_dates(df, date_col_name, other_col):
    startd = df[date_col_name].values[0]
    endd = df[date_col_name].values[-1]
    print startd, endd
    idx = pd.date_range(startd, endd)

    dict = {}
    for index, row in df.iterrows():
        dict[row[date_col_name]] = row[other_col]

    new_data = []
    for d in idx:
        pydate = d.to_pydatetime()
        daskey = pydate.strftime('%Y-%m-%d')
        new_data.append([daskey, dict[daskey] if dict.has_key(daskey) else 0])

    return np.row_stack(new_data)
项目:differential-lattice    作者:inconvergent    | 项目源码 | 文件源码
def link_export(self):

    from numpy import row_stack

    num = self.num
    links = self.links[:num*10, 0]

    edges = set()
    for i, c in enumerate(self.link_counts[:num,0]):
      for k in range(c):

        j = links[10*i+k]
        if i<j:
          lnk = (i, j)
        else:
          lnk = (j, i)

        if lnk not in edges:
          edges.add(lnk)

    return self.xy[:num,:], row_stack(list(edges))
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def lookAtTransform(pos, target, up, square=False, camera=False):
    pos = np.array(pos, np.float32)
    target = np.array(target, np.float32)
    up = np.array(up, np.float32)

    # print 'lookAtTransform:'
    dir = target - pos

    R = rotationFromVectors(dir, up, camera=camera)

    # print 'R:', R

    pos = np.matrix(pos).T

    V = np.column_stack((R, -R*pos))

    if square:
        V = np.row_stack((
            V,
            np.array([0,0,0,1], np.float32)
        ))

    return V
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def unprojectOpenGL(self, u):
        # K, R, t = camera.factor()

        # squareProj = np.row_stack((
        #     camera.P,
        #     np.array([0,0,0,1], np.float32)
        # ))
        # invProj = np.linalg.inv(squareProj)
        # x = invProj*np.row_stack([np.mat(u).T, [1]])
        # x = x[:3]

        # u = np.mat(u).T
        # x = np.linalg.inv(R)*(np.linalg.inv(K)*u - t)

        proj = self.getOpenGlCameraMatrix()
        invProj = np.linalg.inv(proj)
        x = invProj*np.row_stack([np.mat(u).T, [1]])
        x = x[:3] / x[3]
        return x
项目:barvikron    作者:catch22    | 项目源码 | 文件源码
def kronecker_weight_vpn(dims):
    """
    Return VectorPartitionFunction for computing weight multiplicities in
    the symmetric algebra Sym(C^prod(dims)) with respect to the maximal torus
    of GL(dims[1]) x ... x GL(dims[n]).
    """
    # build list of multi-indices
    multi_indices = list(itertools.product(*map(range, dims)))

    # build matrix such that the r-th row corresponds to the r-th entries of all weights
    As = []
    for i, dim in enumerate(dims):
        A = np.zeros(shape=(dim, len(multi_indices)), dtype=object)
        for j, midx in enumerate(multi_indices):
            A[midx[i], j] = 1
        As.append(A)
    A = np.row_stack(As)

    return VectorPartitionFunction(A)
项目:vae-flow    作者:andymiller    | 项目源码 | 文件源码
def callback(itr):
    def samplefun(num_samps):
        import numpy as np
        z = np.array(np.random.randn(num_samps, zdim), dtype=np.float32)
        return decode(z).eval(session=sess)
    viz.plot_samples(itr, samplefun, savedir='vae_mnist_samples')

    def sample_z(mu, log_sigmasq, M=5):
        eps = tf.random_normal((M, zdim), dtype=tf.float32)
        return mu + tf.exp(0.5 * log_sigmasq) * eps

    def recons(num_samps):
        # random subset
        subset = X[np.random.choice(X.shape[0], 1)]
        mu, log_sigmasq = encode(subset)
        imgs = decode(sample_z(mu, log_sigmasq, M=24)).eval(session=sess)
        return np.row_stack([subset, imgs])
    viz.plot_samples(itr, recons, savedir='vae_mnist_samples', stub='recon')
    test_lb = test_lb_fun.eval(session=sess) * Ntest
    print "test data VLB: ", np.mean(test_lb)

##########################################
# Make gradient descent fitting function #
##########################################
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def _estimate_current_anticlockwise_degrees_using_minarearect(self, spot_xy) -> float:
        # Find the minimum area rectangle around the number
        nearby_contour_groups = contour_tools.extract_contour_groups_close_to(
            self.contour_groups, target_point_xy=spot_xy, delta=self._min_pixels_between_contour_groups)
        nearby_contours = [c for grp in nearby_contour_groups for c in grp]
        box = cv2.minAreaRect(np.row_stack(nearby_contours))
        corners_xy = cv2.boxPoints(box).astype(np.int32)
        self._log_contours_on_current_image([corners_xy], name="Minimum area rectangle")

        # Construct a vector which, once correctly rotated, goes from the bottom right corner up & left at 135 degrees
        sorted_corners = sorted(corners_xy, key=lambda pt: np.linalg.norm(spot_xy - pt))
        bottom_right_corner = sorted_corners[0]  # The closest corner to the spot
        adjacent_corners = sorted_corners[1:3]  # The next two closest corners

        unit_vectors_along_box_edge = misc.normalised(adjacent_corners - bottom_right_corner)
        up_left_diagonal = unit_vectors_along_box_edge.sum(axis=0)

        degrees_of_up_left_diagonal = np.rad2deg(np.arctan2(-up_left_diagonal[1], up_left_diagonal[0]))
        return degrees_of_up_left_diagonal - 135
项目:fracture-cuda    作者:inconvergent    | 项目源码 | 文件源码
def get_fractures(self):
    res = defaultdict(list)

    for fid, node in self.fid_node[:self.fnum, :]:
      res[fid].append(self.xy[node, :])

    return [row_stack(v) for k, v in res.items()]
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def write(sand):
  from modules.writer import Writer

  lines = []
  vertices = []

  vnum = 0

  W = Writer(
      GLYPH_HEIGHT,
      GLYPH_WIDTH,
      WORD_SPACE,
      SHIFT_PROB,
      SHIFT_SIZE,
      EDGE
      )

  i = 0
  for y in linspace(EDGE, 1.0-EDGE, ROW_NUM):
    print(y)
    for a in W.export(
        get_word_generator(),
        y,
        gnum = GNUM,
        inum = INUM
        ):

      sand.paint_dots(a)
      i += 1

      vertices.append(a)
      lines.append(arange(len(a)).astype('int')+vnum)
      vnum += len(a)

  return row_stack(vertices), lines
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _interpolate_write_with_cursive(glyphs, inum, theta, noise, offset_size):
  stack = row_stack(glyphs)
  ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
  gamma = theta + cumsum((1.0-2.0*random(len(ig)))*noise)
  dd = column_stack((cos(gamma), sin(gamma)))*offset_size
  a = ig + dd
  b = ig + dd[:,::-1]*array((1,-1))

  return a, b
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _export(self, glyphs, inum):
  stack = row_stack(glyphs)
  ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
  return ig
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def load(fn):

  from codecs import open
  from numpy import row_stack

  vertices = []
  faces = []
  lines = []

  with open(fn, 'r', encoding='utf8') as f:

    for l in f:
      if l.startswith('#'):
        continue

      values = l.split()
      if not values:
        continue
      if values[0] == 'v':
        vertices.append([float(v) for v in values[1:]])

      if values[0] == 'f':
        face = [int(v.split('//')[0])-1 for v in values[1:]]
        faces.append(face)

      if values[0] == 'l':
        line = [int(v.split('//')[0])-1 for v in values[1:]]
        lines.append(line)

  try:
    faces = row_stack(faces)
  except ValueError:
    faces = None

  return {
    'faces': faces,
    'vertices': row_stack(vertices),
    'lines': lines
  }
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def spatial_concat_2d(paths, eps=1.e-9):

  from numpy.linalg import norm
  from numpy import row_stack

  res = []
  curr = paths[0]
  concats = 0
  for p in paths[1:]:
    if p.shape[0]<2:
      print('WARNING: path with only one vertex.')
      continue
    if norm(p[0,:]-curr[-1,:])<eps:
      curr = row_stack([curr, p[1:,:]])
      concats += 1
    else:
      res.append(curr)
      curr = p

  res.append(curr)

  print('concats: ', concats)
  print('original paths: ', len(paths))
  print('number after concatination: ', len(res))

  print()

  return res
项目:svg-sorter    作者:inconvergent    | 项目源码 | 文件源码
def spatial_concat(paths, eps=1.e-9):

  from numpy.linalg import norm
  from numpy import row_stack

  res = []
  curr = paths[0]
  concats = 0
  for p in paths[1:]:
    if p.shape[0]<2:
      print('WARNING: path with only one vertex.')
      continue
    if norm(p[0,:]-curr[-1,:])<eps:
      curr = row_stack([curr, p[1:,:]])
      concats += 1
    else:
      res.append(curr)
      curr = p

  res.append(curr)

  print('concats: ', concats)
  print('original paths: ', len(paths))
  print('number after concatination: ', len(res))

  print()

  return res
项目:marseille    作者:vene    | 项目源码 | 文件源码
def optimize_glove(glove_path, vocab):
    """Trim down GloVe embeddings to use only words in the data."""
    vocab_set = frozenset(vocab)
    seen_vocab = []
    X = []
    with open(glove_path) as f:
        for line in f:
            line = line.strip().split(' ')  # split() fails on ". . ."
            word, embed = line[0], line[1:]
            if word in vocab_set:
                X.append(np.array(embed, dtype=np.float32))
                seen_vocab.append(word)
    return seen_vocab, np.row_stack(X)
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def dietrich_baseline(bands, intensities, half_window=16, num_erosions=10):
    '''
    Fast and precise automatic baseline correction of ... NMR spectra, 1991.
    http://www.sciencedirect.com/science/article/pii/002223649190402F
    http://www.inmr.net/articles/AutomaticBaseline.html
    '''
    # Step 1: moving-window smoothing
    w = half_window * 2 + 1
    window = np.ones(w) / float(w)
    Y = intensities.copy()
    if Y.ndim == 2:
        window = window[None]
    Y[..., half_window:-half_window] = convolve(Y, window, mode='valid')

    # Step 2: Derivative.
    dY = np.diff(Y) ** 2

    # Step 3: Iterative thresholding.
    is_baseline = np.ones(Y.shape, dtype=bool)
    is_baseline[..., 1:] = iterative_threshold(dY)

    # Step 3: Binary erosion, to get rid of peak-tops.
    mask = np.zeros_like(is_baseline)
    mask[..., half_window:-half_window] = True
    s = np.ones(3, dtype=bool)
    if Y.ndim == 2:
        s = s[None]
    is_baseline = binary_erosion(is_baseline, structure=s,
                                 iterations=num_erosions, mask=mask)

    # Step 4: Reconstruct baseline via interpolation.
    if Y.ndim == 2:
        return np.row_stack([np.interp(bands, bands[m], y[m])
                             for y, m in zip(intensities, is_baseline)])
    return np.interp(bands, bands[is_baseline], intensities[is_baseline])
项目:skan    作者:jni    | 项目源码 | 文件源码
def summarise(skelimage):
    ndim = skelimage.ndim
    g, counts, skelimage_labeled = skeleton_to_nx(skelimage)
    coords = np.nonzero(skelimage)
    ids = skelimage_labeled[coords]
    sorted_coords = np.transpose(coords)[np.argsort(ids)]
    tables = []
    for i, cc in enumerate(nx.connected_component_subgraphs(g)):
        stats = branch_statistics(cc)
        if stats.size == 0:
            continue
        coords0 = sorted_coords[stats[:, 0].astype(int) - 1]
        coords1 = sorted_coords[stats[:, 1].astype(int) - 1]
        distances = np.sqrt(np.sum((coords0 - coords1)**2, axis=1))
        skeleton_id = np.full(distances.shape, i, dtype=float)
        tables.append(np.column_stack((skeleton_id, stats,
                                       coords0, coords1, distances)))
    columns = (['skeleton-id', 'node-id-0', 'node-id-1', 'branch-distance',
                'branch-type'] +
               ['coord-0-%i' % i for i in range(ndim)] +
               ['coord-1-%i' % i for i in range(ndim)] +
               ['euclidean-distance'])
    column_types = [int, int, int, float, int] + 2*ndim*[int] + [float]
    arr = np.row_stack(tables).T
    data_dict = {col: dat.astype(dtype)
                 for col, dat, dtype in zip(columns, arr, column_types)}
    df = pd.DataFrame(data_dict)
    return df
项目:sentiment_analysis_textcnn    作者:norybaby    | 项目源码 | 文件源码
def __init__(self,file_path):
        # w2v_file = os.path.join(base_path, "vectors_poem.bin")
        self.model = word2vec.load(file_path)
        if 'unknown' not  in self.model.vocab_hash:
            unknown_vec = np.random.uniform(-0.1,0.1,size=128)
            self.model.vocab_hash['unknown'] = len(self.model.vocab)
            self.model.vectors = np.row_stack((self.model.vectors,unknown_vec))
项目:chandra-acis-analysis    作者:liweitianux    | 项目源码 | 文件源码
def read_merged_qdp(infile):
    """
    Read merged QDP with multiple group of data separated by "no no no".
    """
    lines = map(lambda line: re.sub(r"^\s*no\s+no\s+no.*$", "X",
                                    line.strip(), flags=re.I),
                open(infile).readlines())
    lines = isplit(lines, ("X",))
    data_groups = []
    for block in lines:
        data = [list(map(float, l.split())) for l in block]
        data.append(np.row_stack(data))
    return data_groups
项目:pygeotools    作者:dshean    | 项目源码 | 文件源码
def sliding_window_padded(a, ws, ss=(1,1), flatten=True):
    colpad = ws[0]/2
    col_a = np.empty((a.shape[0],colpad))
    col_a[:] = np.nan
    a = np.column_stack([col_a, a, col_a])
    rowpad = ws[1]/2
    row_a = np.empty((rowpad, a.shape[1]))
    row_a[:] = np.nan
    a = np.row_stack([row_a, a, row_a])
    return sliding_window(a, ws, ss, flatten) 

#From http://www.johnvinyard.com/blog/?p=268
项目:gdax-trader    作者:mcardillo55    | 项目源码 | 文件源码
def add_stick(self, stick_to_add):
        self.candlesticks = np.row_stack((self.candlesticks, stick_to_add.close_candlestick(self.name)))
项目:gdax-trader    作者:mcardillo55    | 项目源码 | 文件源码
def close_candlestick(self):
        if not self.updated_hist_data:
            self.time_of_first_candlestick_close = datetime.datetime.now()
        if len(self.candlesticks) > 0:
            self.candlesticks = np.row_stack((self.candlesticks,
                                              self.cur_candlestick.close_candlestick(period_name=self.name,
                                                                                     prev_stick=self.candlesticks[-1])))
        else:
            self.candlesticks = np.array([self.cur_candlestick.close_candlestick(self.name)])
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def generate_quadrants(rows, rng):
    Q0 = rng.multivariate_normal([2,2], cov=[[.5,0],[0,.5]], size=rows/4)
    Q1 = rng.multivariate_normal([-2,2], cov=[[.5,0],[0,.5]], size=rows/4)
    Q2 = rng.multivariate_normal([-2,-2], cov=[[.5,0],[0,.5]], size=rows/4)
    Q3 = rng.multivariate_normal([2,-2], cov=[[.5,0],[0,.5]], size=rows/4)
    colors = iter(cm.gist_rainbow(np.linspace(0, 1, 4)))
    for q in [Q0, Q1, Q2, Q3]:
        plt.scatter(q[:,0], q[:,1], color=next(colors))
    plt.close('all')
    return np.row_stack((Q0, Q1, Q2, Q3))
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def joint_parameters(self):
        mean = np.concatenate((np.zeros(self.L), self.mux))
        cov = np.row_stack((
            np.column_stack((np.eye(self.L), self.W.T)),
            np.column_stack((self.W, np.dot(self.W, self.W.T) + self.Psi))
        ))
        return mean, cov
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def mvn_marginalize(mu, cov, query, evidence):
        Q, E = query, evidence
        # Retrieve means.
        muQ = mu[Q]
        muE = mu[E]
        # Retrieve covariances.
        covQ = cov[Q][:,Q]
        covE = cov[E][:,E]
        covJ = cov[Q][:,E]
        covQE = np.row_stack((
            np.column_stack((covQ, covJ)),
            np.column_stack((covJ.T, covE))
        ))
        assert np.allclose(covQE, covQE.T)
        return muQ, muE, covQ, covE, covJ
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def parse_parameter_sweep(file='/Users/srinath/playground/data-science/BimboInventoryDemand/logs/xgboost_params-explore-case4.txt'):
    file = open(file,'r')
    data =  file.read()

    data = data.replace('\n','')
    data = re.sub(r'\[=+\'\].*?s', '', data)
    #28. feature 27 =Producto_ID_Dev_proxima_StdDev (0.002047)

    p1 = re.compile('Run ([0-9+]) XGBoost_nocv {(.*?)} .*?rmsle=([0-9.]+)')

    readings = []
    for match in p1.finditer(data):
        data_index = int(match.group(1))
        params_as_str = match.group(2)
        rmsle = float(match.group(3))
        print data_index, rmsle, params_as_str

        kvmap = parse_map_from_str(params_as_str)
        print kvmap
        readings.append([data_index, rmsle, kvmap['eta'], kvmap['max_depth'], kvmap['min_child_weight'], kvmap['gamma'],
                         kvmap['subsample'], kvmap['colsample_bytree']])

    df_data = np.row_stack(readings)
    para_sweep_df= pd.DataFrame(df_data, columns=['data_index' , 'rmsle', 'eta', 'max_depth', 'min_child_weight', 'gamma',
                         'subsample', 'colsample_bytree'])
    print para_sweep_df
    return para_sweep_df
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def vote_with_lr(conf, forecasts, best_model_index, y_actual):
    start = time.time()
    best_forecast = forecasts[:, best_model_index]
    forecasts = np.sort(np.delete(forecasts, best_model_index, axis=1), axis=1)
    forecasts = np.where(forecasts <=0, 0.1, forecasts)

    data_train = []

    for i in range(forecasts.shape[0]):
        f_row = forecasts[i,]
        min_diff_to_best = np.min([cal_rmsle(best_forecast[i], f) for f in f_row])
        comb = list(itertools.combinations(f_row,2))
        avg_error = scipy.stats.hmean([cal_rmsle(x,y) for (x,y) in comb])
        data_train.append([min_diff_to_best, avg_error, scipy.stats.hmean(f_row), np.median(f_row), np.std(f_row)])


    X_all = np.column_stack([np.row_stack(data_train), best_forecast])
    if conf.target_as_log:
        y_actual = transfrom_to_log(y_actual)
    #we use 10% full data to train the ensamble and 30% for evalaution
    no_of_training_instances = int(round(len(y_actual)*0.25))
    X_train, X_test, y_train, y_test = train_test_split(no_of_training_instances, X_all, y_actual)
    y_actual_test = y_actual[no_of_training_instances:]

    lr_model =linear_model.Lasso(alpha = 0.2)
    lr_model.fit(X_train, y_train)
    lr_forecast = lr_model.predict(X_test)
    lr_forcast_revered = retransfrom_from_log(lr_forecast)
    calculate_accuracy("vote__lr_forecast " + str(conf.command), y_actual_test, lr_forcast_revered)
    print_time_took(start, "vote_with_lr")
    return lr_forcast_revered
项目:mlprojects-py    作者:srinathperera    | 项目源码 | 文件源码
def parse_feature_explore_output(file_name, feature_importance_map):
    #[IDF1] ['clients_combined_vh_Mean_x', 'clients_combined_vhci_x', 'clients_combined_vh_median_x', 'Producto_ID_Venta_hoy_Mean', 'Producto_ID_Venta_hoyci', 'Producto_ID_Venta_hoy_median', 'Producto_ID_Dev_proxima_Mean', 'Producto_ID_Dev_proximaci', 'Producto_ID_Dev_proxima_median', 'agc_product_Mean', 'agc_productci', 'agc_product_median'] XGB 0.584072902792

    file = open(file_name,'r')
    data =  file.read()

    data = data.replace('\n','')
    data = re.sub(r'\[=+\'\].*?s', '', data)
    #28. feature 27 =Producto_ID_Dev_proxima_StdDev (0.002047)

    p1 = re.compile('\[IDF1\] (\[.*?\]) XGB ([0-9.]+)')

    readings = []
    for match in p1.finditer(data):
        feature_set = match.group(1)
        rmsle = float(match.group(2))
        if 0.56 < rmsle < 0.57:
            for f in parse_list_from_str(feature_set):
                count = feature_importance_map.get(f, 0)
                count += 1
                feature_importance_map[f] = count
        readings.append([feature_set, rmsle])

    df_data = np.row_stack(readings)
    para_sweep_df= pd.DataFrame(df_data, columns=['feature_set' , 'rmsle'])
    return para_sweep_df
项目:keras-face-attribute-manipulation    作者:wkcw    | 项目源码 | 文件源码
def combine_label_batch(num0, num1, numt=0, order='01'):
    assert order=='01' or order=='10'
    label_batch_0 = np.tile((1,0,0),(num0,1))
    label_batch_1 = np.tile((0,1,0),(num1,1))
    label_batch_t = np.tile((0,0,1),(numt,1))
    if order == '01':
        label_batch_all = np.row_stack((label_batch_0, label_batch_1, label_batch_t))
    else:
        label_batch_all = np.row_stack((label_batch_1, label_batch_0, label_batch_t))
    label_batch_all = label_batch_all.astype('float32')
    return label_batch_all
项目:SamuROI    作者:samuroi    | 项目源码 | 文件源码
def linescan(self, data, mask):
        """
        Calculate the trace for all children and return a 2D array of traces.
        :param data: the data to apply on.
        :param mask: some additional overlay mask
        :return: 2D numpy array holding traces for all children
        """
        return numpy.row_stack((child(data, mask) for child in self.segments))
项目:SamuROI    作者:samuroi    | 项目源码 | 文件源码
def outline(self):
        """
        Return the corners of the branch in such order that they encode a polygon.
        """
        return numpy.row_stack((self.corners[:, 0, :], self.corners[::-1, 1, :]))
项目:SamuROI    作者:samuroi    | 项目源码 | 文件源码
def quadrilaterals(self):
        """
        Generator over quadrilateral segments of that branch.
        """
        if self.nquadrilaterals > 0:
            corners = self.corners
            for i in range(self.nquadrilaterals):
                yield numpy.row_stack((corners[i, 0, :], corners[i + 1, 0, :], corners[i + 1, 1, :], corners[i, 1, :]))
项目:SamuROI    作者:samuroi    | 项目源码 | 文件源码
def linescan(self):
        """
        Calculate the trace for all children and return a 2D array aka linescan for that branch roi.
        """
        if self.parent_mask in self.__linescans:
            return self.__linescans[self.parent_mask]
        import numpy
        data = self.segmentation.data
        overlay = self.segmentation.overlay
        postprocessor = self.segmentation.postprocessor
        self.__linescans[self.parent_mask] = numpy.row_stack(
            (postprocessor(child(data, overlay)) for child in self.parent_mask.children))
        return self.__linescans[self.parent_mask]
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def draw(self, program, model=None, rawVertices=False):
        scale = np.eye(4, dtype=np.float32)
        scale[0,0] = self.scale[0]
        scale[1,1] = self.scale[1]
        scale[2,2] = self.scale[2]

        if not rawVertices:
            if model == None:
                orient = lookAtTransform(self.pos, self.pos + self.dir, self.up, square=True)
                # model = np.linalg.inv(orient)*scale
                model = np.linalg.inv(orient)*scale
            else:
                orient = lookAtTransform(self.pos, self.pos + self.dir, self.up, square=True)
                # model = model*np.linalg.inv(orient)*scale
                model = model*np.linalg.inv(orient)*scale
        else:
            model = np.eye(4, dtype=np.float32)
        program.setUniformMat4('model', model)

        # for mesh in self.aiModel.meshes:
        #     for i in range(0, len(mesh.vertices)):
        #         # print 'model', model
        #         vert = np.row_stack([np.matrix(mesh.vertices[i]).T, np.array([1])])
        #         worldVert = model*vert
        #         eyeVert = proj*worldVert
        #         ndcVert = eyeVert[:3]/eyeVert[3]
        #         print 'worldVert:', worldVert.T
        #         # print '   m->w', worldVert.T
        #         print '   w->e', eyeVert.T
        #         print '   e->n', ndcVert.T

        glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)

        for mesh in self.meshBuffers:
            mesh.draw(program)

        glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
项目:car-detection    作者:mmetcalfe    | 项目源码 | 文件源码
def getOpenGlCameraMatrix(self):
        K, R, t = self.factor()
        # print 'R', R

        # print 'getOpenGlCameraMatrix:'
        # print 'Kraw:', K

        K = convertToOpenGLCameraMatrix(K, self.framebufferSize, self.near, self.far)

        # print 'K:', K

        V = np.column_stack((R, t))
        V = np.row_stack((
            V,
            np.array([0,0,0,1], np.float32)
        ))

        # print 'V:', V

        P = K*V

        # print 'P:', P

        # vpMat = viewPortMatrix(framebufferSize)
        # print 'vpMat:', vpMat
        # print 'VpP:', vpMat*P

        return P
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def _predict(self, t, X):
        sess = tf.get_default_session()

        N, _ = X.shape
        B = self.input_var.get_shape()[0].value

        if B is None or B == N:
            pred = sess.run(t, {self.input_var: X})
        else:
            pred = [sess.run(t, {self.input_var: X[i:i + B]})
                    for i in range(0, N, B)]
            pred = np.row_stack(pred)

        return pred
项目:HIT_ML_2017    作者:Red-Night-Aria    | 项目源码 | 文件源码
def calc_L(matrix):
    (x, y) = np.shape(matrix)
    return np.row_stack((np.mat(np.ones((1, y))), Sigmoid(matrix)))
项目:crankshaft    作者:CartoDB    | 项目源码 | 文件源码
def predict_segment(model, features, target_query):
    """
    Use the provided model to predict the values for the new feature set
        Input:
            @param model: The pretrained model
            @features: A list of features to use in the model prediction (list of column names)
            @target_query: The query to run to obtain the data to predict on and the cartdb_ids associated with it.
    """

    batch_size = 1000
    joined_features = ','.join(['"{0}"::numeric'.format(a) for a in features])

    try:
        cursor = plpy.cursor('SELECT Array[{joined_features}] As features FROM ({target_query}) As a'.format(
            joined_features=joined_features,
            target_query=target_query))
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    results = []

    while True:
        rows = cursor.fetch(batch_size)
        if not rows:
            break
        batch = np.row_stack([np.array(row['features'], dtype=float) for row in rows])

        #Need to fix this. Should be global mean. This will cause weird effects
        batch = replace_nan_with_mean(batch)
        prediction = model.predict(batch)
        results.append(prediction)

    try:
        cartodb_ids = plpy.execute('''SELECT array_agg(cartodb_id ORDER BY cartodb_id) As cartodb_ids FROM ({0}) As a'''.format(target_query))[0]['cartodb_ids']
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    return cartodb_ids, np.concatenate(results)
项目:crankshaft    作者:CartoDB    | 项目源码 | 文件源码
def predict_segment(model, features, target_query):
    """
    Use the provided model to predict the values for the new feature set
        Input:
            @param model: The pretrained model
            @features: A list of features to use in the model prediction (list of column names)
            @target_query: The query to run to obtain the data to predict on and the cartdb_ids associated with it.
    """

    batch_size = 1000
    joined_features = ','.join(['"{0}"::numeric'.format(a) for a in features])

    try:
        cursor = plpy.cursor('SELECT Array[{joined_features}] As features FROM ({target_query}) As a'.format(
            joined_features=joined_features,
            target_query=target_query))
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    results = []

    while True:
        rows = cursor.fetch(batch_size)
        if not rows:
            break
        batch = np.row_stack([np.array(row['features'], dtype=float) for row in rows])

        #Need to fix this. Should be global mean. This will cause weird effects
        batch = replace_nan_with_mean(batch)
        prediction = model.predict(batch)
        results.append(prediction)

    try:
        cartodb_ids = plpy.execute('''SELECT array_agg(cartodb_id ORDER BY cartodb_id) As cartodb_ids FROM ({0}) As a'''.format(target_query))[0]['cartodb_ids']
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    return cartodb_ids, np.concatenate(results)
项目:crankshaft    作者:CartoDB    | 项目源码 | 文件源码
def predict_segment(model, features, target_query):
    """
    Use the provided model to predict the values for the new feature set
        Input:
            @param model: The pretrained model
            @features: A list of features to use in the model prediction (list of column names)
            @target_query: The query to run to obtain the data to predict on and the cartdb_ids associated with it.
    """

    batch_size = 1000
    joined_features = ','.join(['"{0}"::numeric'.format(a) for a in features])

    try:
        cursor = plpy.cursor('SELECT Array[{joined_features}] As features FROM ({target_query}) As a'.format(
            joined_features=joined_features,
            target_query=target_query))
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    results = []

    while True:
        rows = cursor.fetch(batch_size)
        if not rows:
            break
        batch = np.row_stack([np.array(row['features'], dtype=float) for row in rows])

        #Need to fix this. Should be global mean. This will cause weird effects
        batch = replace_nan_with_mean(batch)
        prediction = model.predict(batch)
        results.append(prediction)

    try:
        cartodb_ids = plpy.execute('''SELECT array_agg(cartodb_id ORDER BY cartodb_id) As cartodb_ids FROM ({0}) As a'''.format(target_query))[0]['cartodb_ids']
    except Exception, e:
        plpy.error('Failed to build segmentation model: %s' % e)

    return cartodb_ids, np.concatenate(results)