Python numpy 模块,tile() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.tile()

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True):
    if not len(bboxes): 
        return vis

    if not colored: 
        cols = np.tile([240,240,240], [len(bboxes), 1])
    else: 
        N = 20
        cwheel = colormap(np.linspace(0, 1, N))
        cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)])            

    texts = [None] * len(bboxes) if texts is None else texts
    for col, b, t in zip(cols, bboxes, texts): 
        if ellipse: 
            cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360, 
                        color=tuple(col), thickness=1)
        else: 
            cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2)
        if t: 
            annotate_bbox(vis, b, title=t)
    return vis
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def __init__(self, y_coo, num_factor, bias_scale, factor_scale, weight=None):

        if weight is None:
            weight = np.ones(y_coo.data.size)

        self.y_coo = y_coo
        self.y_csr = scipy.sparse.csr_matrix(y_coo)
        self.y_csc = scipy.sparse.csc_matrix(y_coo)
        self.num_factor = num_factor
        self.prior_param = {
            'col_bias_scale': bias_scale,
            'row_bias_scale': bias_scale,
            'factor_scale': np.tile(factor_scale, self.num_factor),
            'weight': weight,
            'obs_df': float('inf'),
            'param_df': float('inf'),
        }
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def EStep(self):
    P = np.zeros((self.M, self.N))

    for i in range(0, self.M):
      diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
      diff    = np.multiply(diff, diff)
      P[i, :] = P[i, :] + np.sum(diff, axis=1)

    c = (2 * np.pi * self.sigma2) ** (self.D / 2)
    c = c * self.w / (1 - self.w)
    c = c * self.M / self.N

    P = np.exp(-P / (2 * self.sigma2))
    den = np.sum(P, axis=0)
    den = np.tile(den, (self.M, 1))
    den[den==0] = np.finfo(float).eps

    self.P   = np.divide(P, den)
    self.Pt1 = np.sum(self.P, axis=0)
    self.P1  = np.sum(self.P, axis=1)
    self.Np  = np.sum(self.P1)
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def im_detect(sess, net, im):
  blobs, im_scales = _get_blobs(im)
  assert len(im_scales) == 1, "Only single-image batch implemented"

  im_blob = blobs['data']
  blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)

  _, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])

  boxes = rois[:, 1:5] / im_scales[0]
  scores = np.reshape(scores, [scores.shape[0], -1])
  bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
  if cfg.TEST.BBOX_REG:
    # Apply bounding-box regression deltas
    box_deltas = bbox_pred
    pred_boxes = bbox_transform_inv(boxes, box_deltas)
    pred_boxes = _clip_boxes(pred_boxes, im.shape)
  else:
    # Simply repeat the boxes, once for each class
    pred_boxes = np.tile(boxes, (1, scores.shape[1]))

  return scores, pred_boxes
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def get_color_arr(c, n, flip_rb=False):
    """ 
    Convert string c to carr array (N x 3) format
    """
    carr = None;

    if isinstance(c, str): # single color
        carr = np.tile(np.array(colorConverter.to_rgb(c)), [n,1])
    elif  isinstance(c, float):
        carr = np.tile(np.array(color_func(c)), [n,1])
    else:
        carr = reshape_arr(c)

    if flip_rb: 
        b, r = carr[:,0], carr[:,2]
        carr[:,0], carr[:,2] = r.copy(), b.copy()

    # return floating point with values in [0,1]
    return carr.astype(np.float32) / 255.0 if carr.dtype == np.uint8 else carr.astype(np.float32)
项目:shift-detect    作者:paolodedios    | 项目源码 | 文件源码
def getMedianDistanceBetweenSamples(self, sampleSet=None) :
        """
        Jaakkola's heuristic method for setting the width parameter of the Gaussian
        radial basis function kernel is to pick a quantile (usually the median) of
        the distribution of Euclidean distances between points having different
        labels.

        Reference:
        Jaakkola, M. Diekhaus, and D. Haussler. Using the Fisher kernel method to detect
        remote protein homologies. In T. Lengauer, R. Schneider, P. Bork, D. Brutlad, J.
        Glasgow, H.- W. Mewes, and R. Zimmer, editors, Proceedings of the Seventh
        International Conference on Intelligent Systems for Molecular Biology.
        """
        numrows = sampleSet.shape[0]
        samples = sampleSet

        G = sum((samples * samples), 1)
        Q = numpy.tile(G[:, None], (1, numrows))
        R = numpy.tile(G, (numrows, 1))

        distances = Q + R - 2 * numpy.dot(samples, samples.T)
        distances = distances - numpy.tril(distances)
        distances = distances.reshape(numrows**2, 1, order="F").copy()

        return numpy.sqrt(0.5 * numpy.median(distances[distances > 0]))
项目:Renewables_Scenario_Gen_GAN    作者:chennnnnyize    | 项目源码 | 文件源码
def load_solar_data():
    with open('solar label.csv', 'r') as csvfile:
        reader = csv.reader(csvfile)
        rows = [row for row in reader]
    labels = np.array(rows, dtype=int)
    print(shape(labels))

    with open('solar.csv', 'r') as csvfile:
        reader = csv.reader(csvfile)
        rows = [row for row in reader]
    rows = np.array(rows, dtype=float)
    rows=rows[:104832,:]
    print(shape(rows))
    trX = np.reshape(rows.T,(-1,576))
    print(shape(trX))
    m = np.ndarray.max(rows)
    print("maximum value of solar power", m)
    trY=np.tile(labels,(32,1))
    trX=trX/m
    return trX,trY
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def to_rgb(img):
    """
    Converts the given array into a RGB image. If the number of channels is not
    3 the array is tiled such that it has 3 channels. Finally, the values are
    rescaled to [0,255) 

    :param img: the array to convert [nx, ny, channels]

    :returns img: the rgb image [nx, ny, 3]
    """
    img = np.atleast_3d(img)
    channels = img.shape[2]
    if channels < 3:
        img = np.tile(img, 3)

    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    img *= 255
    return img
项目:xarray-simlab    作者:benbovy    | 项目源码 | 文件源码
def test_run_model(self, input_dataset, ds_model_interface):
        out_ds = ds_model_interface.run_model()

        expected = input_dataset.copy()
        del expected.attrs[SimlabAccessor._snapshot_vars_key]
        del expected.clock.attrs[SimlabAccessor._snapshot_vars_key]
        del expected.out.attrs[SimlabAccessor._snapshot_vars_key]
        expected['grid__x'] = ('x', np.arange(10), {'description': ''})
        expected['quantity__quantity'] = (
            ('clock', 'x'),
            np.arange(0, 10, 2)[:, None] * np.arange(10) * 1.,
            {'description': 'a quantity'}
        )
        expected['some_process__some_effect'] = (
            ('out', 'x'), np.tile(np.arange(2, 12), 3).reshape(3, 10),
            {'description': ''}
        )
        expected['other_process__other_effect'] = (
            ('out', 'x'), np.tile(np.arange(-2, 8), 3).reshape(3, 10),
            {'description': ''}
        )

        xr.testing.assert_identical(out_ds, expected)
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def fixed_label_diversity(model, config,step=''):
    sample_dir=make_sample_dir(model)
    str_step=str(step) or guess_model_step(model)

    N=64#per image
    n_combo=5#n label combinations

    #0,1 label combinations
    fixed_labels=model.attr.sample(n_combo)[model.cc.node_names]
    size=infer_grid_image_shape(N)

    for j, fx_label in enumerate(fixed_labels.values):
        fx_label=np.reshape(fx_label,[1,-1])
        fx_label=np.tile(fx_label,[N,1])
        do_dict={model.cc.labels: fx_label}

        images, feed_dict= sample(model, do_dict=do_dict)
        fx_file=os.path.join(sample_dir, str_step+'fxlab'+str(j)+'.pdf')
        save_figure_images(model.model_type,images['G'],fx_file,size=size)

    #which image is what label
    fixed_labels=fixed_labels.reset_index(drop=True)
    fixed_labels.to_csv(os.path.join(sample_dir,str_step+'fxlab'+'.csv'))
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def _linear_phase(self, n_shift):
        """
        Private: Select the center of FOV
        """
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def linear_phase(self, n_shift):
        '''
        Select the center of FOV
        '''
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
        # multiply the diagonal, linear phase before the gridding matrix
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def _linear_phase(self, n_shift):
        """
        Private: Select the center of FOV
        """
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(
                self.Nd) /
            2)
        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om *
                numpy.tile(
                    final_shifts,
                    (M,
                     1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
        return 0  # shifted sparse matrix
项目:pynufft    作者:jyhmiinlin    | 项目源码 | 文件源码
def _linear_phase(self, n_shift):
        """
        Private: Select the center of FOV
        """
        om = self.st['om']
        M = self.st['M']
        final_shifts = tuple(
            numpy.array(n_shift) +
            numpy.array(self.st['Nd']) / 2)

        phase = numpy.exp(
            1.0j *
            numpy.sum(
                om * numpy.tile(
                    final_shifts,
                    (M,1)),
                1))
        # add-up all the linear phasees in all axes,

        self.st['p'] = scipy.sparse.diags(phase, 0).dot(self.st['p0'])
项目:rain-metrics-python    作者:apendergrass    | 项目源码 | 文件源码
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
项目:rain-metrics-python    作者:apendergrass    | 项目源码 | 文件源码
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def test_point_cloud_transformation(self, num_points=10):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, 'a', 'b')

        x_a = np.random.rand(3, num_points)
        pc_a = PointCloud(x_a, 'a')

        # multiply with numpy arrays
        x_b = R_a_b.dot(x_a) + np.tile(t_a_b.reshape(3,1), [1, num_points])

        # use multiplication operator
        pc_b = T_a_b * pc_a

        self.assertTrue(np.sum(np.abs(pc_b.data - x_b)) < 1e-5, msg='Point cloud transformation incorrect:\n Expected:\n {}\n Got:\n {}'.format(x_b, pc_b.data)) 

        # check frames
        self.assertEqual(pc_b.frame, 'b', msg='Transformed point cloud has incorrect frame')
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def initializeAllParameters(self):
        self.phi = (1.0 / self.K) * np.ones((self.n_tasks,self.K))
        self.theta = np.tile(self.mu, (self.K, 1)) 
        self.gamma = [self.sigma for i in range(self.K)]
        self.xi = [[0]* len(self.task_dict[i]['Y']) for i in range(self.n_tasks)]
        self.computeXi()
        self.tau1 = self.tau10
        self.tau2 = self.tau20
        self.computeSmallPhis() 
        self.computeTaus()
        self.s = np.zeros((self.n_tasks,self.K))
        self.computeTaskVectors()

        self.xi_convergence_list = []
        self.phi_convergence_list = []
        self.s_convergence_list = []
        self.gamma_convergence_list = []
        self.theta_convergence_list = []

        if self.debug:
            print "initial phi", self.phi
            print "initial small phi1", self.small_phi1
            print "initial small phi2", self.small_phi2
            print "initial tau1", self.tau1, "tau2", self.tau2
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def do_checkpoint(prefix):
    """Callback to checkpoint the model to prefix every epoch.

    Parameters
    ----------
    prefix : str
        The file prefix to checkpoint to

    Returns
    -------
    callback : function
        The callback function that can be passed as iter_end_callback to fit.
    """
    def _callback(iter_no, sym, arg, aux):
        #if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
        #    print "save model with mean/std"
        #    num_classes = len(arg['bbox_pred_bias'].asnumpy()) / 4
        #    means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (1, num_classes))
        #    stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (1, num_classes))
        #    arg['bbox_pred_weight'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
        #    arg['bbox_pred_bias'] = arg['bbox_pred_bias'] * mx.nd.array(np.squeeze(stds)) + \
        #                                   mx.nd.array(np.squeeze(means))
        """The checkpoint function."""
        save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
    return _callback
项目:WGAN_GP    作者:daigo0927    | 项目源码 | 文件源码
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
项目:DL4MT    作者:thompsonb    | 项目源码 | 文件源码
def test_f_init_dims(self):
        """
        Best I can tell, f_init is only ever given one sentence, but it appears to be
        written to process multiple sentences.
        """
        self.logger.info("========================================================================================")
        self.logger.info("Starting the f_init_dims test to determine that x_f_init acts as expected.")
        self.logger.info("========================================================================================")
        x0_state0, x0_ctx0 = self.remote_interface.x_f_init(x0)  # (1, 1024) (31, 1, 2048)

        # If tile input, state/context should be tiled too
        xx0_state0, xx0_ctx0 = self.remote_interface.x_f_init(xx0)  # (2, 1024) (31, 2, 2048)
        self.assertTrue(np.allclose(np.tile(x0_state0, [2, 1]), xx0_state0))
        self.assertTrue(np.allclose(np.tile(x0_ctx0, [1, 2, 1]), xx0_ctx0))

        # Different inputs should create different state
        x1_state0, x1_ctx0 = self.remote_interface.x_f_init(x1)
        self.assertFalse(np.allclose(x0_state0, x1_state0))

        # Different inputs (of same length) should create different state and context
        x1_2_state0, x1_2_ctx0 = self.remote_interface.x_f_init(x1 * 2)
        self.assertFalse(np.allclose(x1_state0, x1_2_state0))
        self.assertFalse(np.allclose(x1_ctx0, x1_2_ctx0))
项目:lps-anchor-pos-estimator    作者:bitcraze    | 项目源码 | 文件源码
def toa_normalize(x0, y0):
    xdim = x0.shape[0]
    m = x0.shape[1]
    n = x0.shape[1]

    t = -x0[:, 1]
    x = x0 + np.tile(t, (1, m))
    y = y0 + np.tile(t, (1, n))

    qr_a = x[:, 2:(1 + xdim)]
    q, r = scipy.linalg.qr(qr_a)

    x = (q.conj().T) * x
    y = (q.conj().T) * y
    M = np.diag(sign(np.diag(qr_a)))
    x1 = M * x
    y1 = M * y

    return x1, y1
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
        # Params:
        #   J - column indices

        nnz_i = len(J)
        residual_i = y_i - mu0 - c[J]
        prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
        v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
        post_Phi_i = prior_Phi + \
                     np.dot(v_T.T,
                            np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T)  # Weighted sum of v_j * v_j.T
        post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_i)
        post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
                                                                                   lower=lower)
        ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
        r_i = ru_i[0]
        u_i = ru_i[1:]

        return r_i, u_i
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):

        prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
        nnz_j = len(I)
        residual_j = y_j - mu0 - r[I]
        u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
        post_Phi_j = prior_Phi + \
                     np.dot(u_T.T,
                            np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T)  # Weighted sum of u_i * u_i.T
        post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_j)
        post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
                                                                              lower=lower)
        cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
        c_j = cv_j[0]
        v_j = cv_j[1:]

        return c_j, v_j
项目:Poccala    作者:Byshx    | 项目源码 | 文件源码
def frame_blocking(signal, framerate, sampletime=0.025, overlap=0.5):
            """
            ??

                ?N???????????????????????N???256?512???????
            ?20~30ms??
            :param framerate:???
            :param signal:????
            :param sampletime:??????
            :param overlap:????????????
            :return: ????????
            """
            samplenum = len(signal)  # ??????
            framesize = int(framerate * sampletime)  # ?????
            step = int(framesize * overlap)  # ?????
            framenum = 1 + math.ceil((samplenum - framesize) / step)  # ??????
            padnum = (framenum - 1) * step + framesize
            zeros = np.zeros((padnum - samplenum,))
            padsignal = np.concatenate((signal, zeros))

            indices = np.tile(np.arange(0, framesize), (framenum, 1)) + np.tile(np.arange(0, framenum * step, step),
                                                                                (framesize, 1)).T
            indices = np.array(indices, dtype=np.int32)
            frames = padsignal[indices]
            return frames
项目:plotnine    作者:has2k1    | 项目源码 | 文件源码
def add_missing_facets(data, layout, vars, facet_vals):
    # When in a dataframe some layer does not have all
    # the facet variables, add the missing facet variables
    # and create new data where the points(duplicates) are
    # present in all the facets
    missing_facets = set(vars) - set(facet_vals)
    if missing_facets:
        to_add = layout.loc[:, missing_facets].drop_duplicates()
        to_add.reset_index(drop=True, inplace=True)

        # a point for each facet, [0, 1, ..., n-1, 0, 1, ..., n-1, ...]
        data_rep = np.tile(np.arange(len(data)), len(to_add))
        # a facet for each point, [0, 0, 0, 1, 1, 1, ... n-1, n-1, n-1]
        facet_rep = np.repeat(np.arange(len(to_add)), len(data))

        data = data.iloc[data_rep, :].reset_index(drop=True)
        facet_vals = facet_vals.iloc[data_rep, :].reset_index(drop=True)
        to_add = to_add.iloc[facet_rep, :].reset_index(drop=True)
        facet_vals = pd.concat([facet_vals, to_add],
                               axis=1, ignore_index=False)

    return data, facet_vals
项目:promplib    作者:baxter-flowers    | 项目源码 | 文件源码
def __init__(self, nrBasis=11, sigma=0.05, num_samples=100):
        self.x = np.linspace(0, 1, num_samples)
        self.nrSamples = len(self.x)
        self.nrBasis = nrBasis
        self.sigma = sigma
        self.sigmaSignal = float('inf')  # Noise of signal (float)
        self.C = np.arange(0,nrBasis)/(nrBasis-1.0)
        self.Phi = np.exp(-.5 * (np.array(map(lambda x: x - self.C, np.tile(self.x, (self.nrBasis, 1)).T)).T ** 2 / (self.sigma ** 2)))
        self.Phi /= sum(self.Phi)

        self.viapoints = []
        self.W = np.array([])
        self.nrTraj = 0
        self.meanW = None
        self.sigmaW = None
        self.Y = np.empty((0, self.nrSamples), float)
项目:promplib    作者:baxter-flowers    | 项目源码 | 文件源码
def generate_trajectory(self, randomness=1e-10):
        """
        Outputs a trajectory
        :param randomness: float between 0. (output will be the mean of gaussians) and 1. (fully randomized inside the variance)
        :return: a 1-D vector of the generated points
        """
        newMu = self.meanW
        newSigma = self.sigmaW

        for viapoint in self.viapoints:
            PhiT = np.exp(-.5 * (np.array(map(lambda x: x - self.C, np.tile(viapoint['t'], (11, 1)).T)).T ** 2 / (self.sigma ** 2)))
            PhiT = PhiT / sum(PhiT)  # basis functions at observed time points

            # Conditioning
            aux = viapoint['sigmay'] + np.dot(np.dot(PhiT.T, newSigma), PhiT)
            newMu = newMu + np.dot(np.dot(newSigma, PhiT) * 1 / aux, (viapoint['obsy'] - np.dot(PhiT.T, newMu)))  # new weight mean conditioned on observations
            newSigma = newSigma - np.dot(np.dot(newSigma, PhiT) * 1 / aux, np.dot(PhiT.T, newSigma))

        sampW = np.random.multivariate_normal(newMu, randomness*newSigma, 1).T
        return np.dot(self.Phi.T, sampW)
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def test_link_matrix(self):
        b, n = 2, 5
        write_weighting = np.random.rand(b, n)
        precedence_weighting = np.random.rand(b, n)  # precedence weighting from previous time step
        link_matrix_old = np.random.rand(b, n, n) * (
            1 - np.tile(np.eye(5), [b, 1, 1]))  # random link matrix with diagonals zero
        link_matrix_correct = np.zeros((b, n, n))
        for k in range(b):
            for i in range(n):
                for j in range(n):
                    if i != j:
                        link_matrix_correct[k, i, j] = (1 - write_weighting[k, i] - write_weighting[k, j]) * \
                                                       link_matrix_old[k, i, j] + \
                                                       write_weighting[k, i] * precedence_weighting[k, j]

        with self.test_session():
            tf.global_variables_initializer().run()
            Memory.batch_size = b
            Memory.memory_size = n
            new_link_matrix = Memory.update_link_matrix(Memory,
                                                        tf.constant(link_matrix_old, dtype=tf.float32),
                                                        tf.constant(precedence_weighting, dtype=tf.float32),
                                                        tf.constant(write_weighting, dtype=tf.float32))
            self.assertAllClose(link_matrix_correct, new_link_matrix.eval())
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels]))
项目:discretize    作者:simpeg    | 项目源码 | 文件源码
def closestPoints(mesh, pts, gridLoc='CC'):
    """Move a list of points to the closest points on a grid.

    :param BaseMesh mesh: The mesh
    :param numpy.ndarray pts: Points to move
    :param string gridLoc: ['CC', 'N', 'Fx', 'Fy', 'Fz', 'Ex', 'Ex', 'Ey', 'Ez']
    :rtype: numpy.ndarray
    :return: nodeInds
    """

    pts = asArray_N_x_Dim(pts, mesh.dim)
    grid = getattr(mesh, 'grid' + gridLoc)
    nodeInds = np.empty(pts.shape[0], dtype=int)

    for i, pt in enumerate(pts):
        if mesh.dim == 1:
            nodeInds[i] = ((pt - grid)**2).argmin()
        else:
            nodeInds[i] = ((np.tile(pt, (grid.shape[0], 1)) - grid)**2).sum(axis=1).argmin()

    return nodeInds
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
    # First figure out what the size of the output should be
    N, C, H, W = x_shape
    assert (H + 2 * padding - field_height) % stride == 0
    assert (W + 2 * padding - field_height) % stride == 0
    out_height = (H + 2 * padding - field_height) / stride + 1
    out_width = (W + 2 * padding - field_width) / stride + 1

    i0 = np.repeat(np.arange(field_height), field_width)
    i0 = np.tile(i0, C)
    i1 = stride * np.repeat(np.arange(out_height), out_width)
    j0 = np.tile(np.arange(field_width), field_height * C)
    j1 = stride * np.tile(np.arange(out_width), out_height)
    i = i0.reshape(-1, 1) + i1.reshape(1, -1)
    j = j0.reshape(-1, 1) + j1.reshape(1, -1)

    k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)

    return (k, i, j)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_big_binary(self):
        """Test workarounds for 32-bit limited fwrite, fseek, and ftell
        calls in windows. These normally would hang doing something like this.
        See http://projects.scipy.org/numpy/ticket/1660"""
        if sys.platform != 'win32':
            return
        try:
            # before workarounds, only up to 2**32-1 worked
            fourgbplus = 2**32 + 2**16
            testbytes = np.arange(8, dtype=np.int8)
            n = len(testbytes)
            flike = tempfile.NamedTemporaryFile()
            f = flike.file
            np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
            flike.seek(0)
            a = np.fromfile(f, dtype=np.int8)
            flike.close()
            assert_(len(a) == fourgbplus)
            # check only start and end for speed:
            assert_((a[:n] == testbytes).all())
            assert_((a[-n:] == testbytes).all())
        except (MemoryError, ValueError):
            pass
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_two_keys_two_vars(self):
        a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
                              np.arange(50, 60), np.arange(10, 20))),
                     dtype=[('k', int), ('a', int), ('b', int), ('c', int)])

        b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
                              np.arange(65, 75), np.arange(0, 10))),
                     dtype=[('k', int), ('a', int), ('b', int), ('c', int)])

        control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
                            (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
                            (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
                            (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
                            (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
                           dtype=[('k', int), ('a', int), ('b1', int),
                                  ('b2', int), ('c1', int), ('c2', int)])
        test = join_by(
            ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
        assert_equal(test.dtype, control.dtype)
        assert_equal(test, control)
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def dist_info_sym(self, obs_var, latent_var=None):  # this is ment to be for one path!
        # now this is not doing anything! And for computing the dist_info_vars of npo_snn_rewardMI it doesn't work
        if latent_var is None:
            latent_var1 = theano.shared(np.expand_dims(self.latent_fix, axis=0))  # new fix to avoid putting the latent as an input: just take the one fixed!
            latent_var = TT.tile(latent_var1, [obs_var.shape[0], 1])

        # generate the generalized input (append latents to obs.)
        if self.bilinear_integration:
            extended_obs_var = TT.concatenate([obs_var, latent_var,
                                               TT.flatten(obs_var[:, :, np.newaxis] * latent_var[:, np.newaxis, :],
                                                          outdim=2)]
                                              , axis=1)
        else:
            extended_obs_var = TT.concatenate([obs_var, latent_var], axis=1)
        mean_var, log_std_var = L.get_output([self._l_mean, self._l_log_std], extended_obs_var)
        if self.min_std is not None:
            log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
        return dict(mean=mean_var, log_std=log_std_var)
项目:DQN    作者:Ivehui    | 项目源码 | 文件源码
def train(self, tran, selected):
        self.targetNet.blobs['frames'].data[...] \
            = tran.frames[selected + 1].copy()
        netOut = self.targetNet.forward()

        target = np.tile(tran.reward[selected]
                         + pms.discount
                         * tran.n_last[selected]
                         * np.resize(netOut['value_q'].max(1),
                                     (pms.batchSize, 1)),
                         (pms.actionSize,)
                         ) * tran.action[selected]

        self.solver.net.blobs['target'].data[...] = target
        self.solver.net.blobs['frames'].data[...] = tran.frames[selected].copy()
        self.solver.net.blobs['filter'].data[...] = tran.action[selected].copy()
        self.solver.step(1)
项目:Generative-ConvACs    作者:HUJI-Deep    | 项目源码 | 文件源码
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):

    raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
    raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
    # Using 'brute' method since we only want to do one query per classifier
    # so this will be quicker as it avoids overhead of creating a search tree
    knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
    prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
    total_images = raw_im_data.shape[0]
    pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
    for i in range(total_images):
        mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
        mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
        v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
        rep_mask = np.tile(mask,(trX.shape[0],1))
        # Corrupt whole training set according to the current mask
        corr_trX = np.multiply(trX, rep_mask)        
        knn_m.fit(corr_trX, trY)
        prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
        pbar.update(i)
    pbar.finish()
    return prob_Y_hat
项目:Generative-ConvACs    作者:HUJI-Deep    | 项目源码 | 文件源码
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
    # First figure out what the size of the output should be
    C, H, W = x_shape
    assert (H + 2 * padding - field_height) % stride == 0
    assert (W + 2 * padding - field_height) % stride == 0
    out_height = (H + 2 * padding - field_height) / stride + 1
    out_width = (W + 2 * padding - field_width) / stride + 1

    i0 = np.repeat(np.arange(field_height), field_width)
    i0 = np.tile(i0, C)
    i1 = stride * np.repeat(np.arange(out_height), out_width)
    j0 = np.tile(np.arange(field_width), field_height * C)
    j1 = stride * np.tile(np.arange(out_width), out_height)
    i = i0.reshape(-1, 1) + i1.reshape(1, -1)
    j = j0.reshape(-1, 1) + j1.reshape(1, -1)

    k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)

    return (k, i, j)
项目:rl-attack-detection    作者:yenchenlin    | 项目源码 | 文件源码
def __init__(self, directory, num_act, mean_path, num_threads=1, capacity=1e5, batch_size=32,
                scale=(1.0/255.0), s_t_shape=[84, 84, 4], x_t_1_shape=[84, 84, 1], colorspace='gray'):
        self.scale = scale
        self.s_t_shape = s_t_shape
        self.x_t_1_shape = x_t_1_shape

        # Load image mean
        mean = np.load(os.path.join(mean_path))

        # Prepare data flow
        s_t, a_t, x_t_1 = _read_and_decode(directory,
                                        s_t_shape=s_t_shape,
                                        num_act=num_act,
                                        x_t_1_shape=x_t_1_shape)
        self.mean = mean
        self.s_t_batch, self.a_t_batch, self.x_t_1_batch = tf.train.shuffle_batch([s_t, a_t, x_t_1],
                                                            batch_size=batch_size, capacity=capacity,
                                                            min_after_dequeue=int(capacity*0.25),
                                                            num_threads=num_threads)

        # Subtract image mean (according to J Oh design)
        self.mean_const = tf.constant(mean, dtype=tf.float32)
        print(self.mean_const.get_shape())
        self.s_t_batch = (self.s_t_batch - tf.tile(self.mean_const, [1, 1, 4])) * scale
        self.x_t_1_batch = (self.x_t_1_batch - self.mean_const) * scale
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels]))
项目:isbi2017-part3    作者:learningtitans    | 项目源码 | 文件源码
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels]))
项目:muesr    作者:bonfus    | 项目源码 | 文件源码
def get_op(self):
        """Returns all symmetry operations (including inversions and
        subtranslations), but unlike get_symop(), they are returned as
        two ndarrays."""
        if self.centrosymmetric:
            rot = np.tile(np.vstack((self.rotations, -self.rotations)), 
                          (self.nsubtrans, 1, 1))
            trans = np.tile(np.vstack((self.translations, -self.translations)),
                            (self.nsubtrans, 1))
            trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0)
            trans = np.mod(trans, 1)
        else:
            rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
            trans = np.tile(self.translations, (self.nsubtrans, 1))
            trans += np.repeat(self.subtrans, len(self.rotations), axis=0)
            trans = np.mod(trans, 1)
        return rot, trans
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def get_feed_dic_obs(self, obs):
    # needing to create all the nessisary feeds
    obs_x = []
    obs_y = []
    obs_tf = []

    for _ in range(OBS_SIZE):
      obs_x.append(np.zeros([N_BATCH,L]))
      obs_y.append(np.zeros([N_BATCH,L]))
      obs_tf.append(np.zeros([N_BATCH,2]))

    num_obs = len(obs)
    for ob_idx in range(num_obs):
      ob_coord, ob_lab = obs[ob_idx]
      ob_x, ob_y = vectorize(ob_coord)
      obs_x[ob_idx] = np.tile(ob_x, [50,1])
      obs_y[ob_idx] = np.tile(ob_y, [50,1])
      obs_tf[ob_idx] = np.tile(ob_lab, [50,1])

    feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf, 
                        obs_x + obs_y + obs_tf))
    return feed_dic
项目:uai2017_learning_to_acquire_information    作者:evanthebouncy    | 项目源码 | 文件源码
def get_feed_dic_obs(self, obs):
    # needing to create all the nessisary feeds
    obs_x = []
    obs_y = []
    obs_tf = []

    for _ in range(OBS_SIZE):
      obs_x.append(np.zeros([N_BATCH,L]))
      obs_y.append(np.zeros([N_BATCH,L]))
      obs_tf.append(np.zeros([N_BATCH,2]))

    num_obs = len(obs)
    for ob_idx in range(num_obs):
      ob_coord, ob_lab = obs[ob_idx]
      ob_x, ob_y = vectorize(ob_coord)
      obs_x[ob_idx] = np.tile(ob_x, [50,1])
      obs_y[ob_idx] = np.tile(ob_y, [50,1])
      obs_tf[ob_idx] = np.tile(ob_lab, [50,1])

    feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf, 
                        obs_x + obs_y + obs_tf))
    return feed_dic
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def collect_trajs_for_cost(self, n_trajs, pol, env, dom, cls):
        paths = []
        #print(n_trajs)
        for iter_step in range(0, n_trajs):
            paths.append(self.cyberpunk_rollout(agent=pol, env=env, max_path_length=self.horizon,
                                                reward_extractor=None))

        data_matrix = tensor_utils.stack_tensor_list([p['im_observations'] for p in paths])
        class_matrix = np.tile(cls, (n_trajs, self.horizon, 1))
        dom_matrix = np.tile(dom, (n_trajs, self.horizon, 1))

        #data_matrix = np.zeros(shape=(n_trajs, self.horizon, self.im_height, self.im_width, self.im_channels))
        #class_matrix = np.zeros(shape=(n_trajs, self.horizon, 2))
        #dom_matrix = np.zeros(shape=(n_trajs, self.horizon, 2))
        #for path, path_step in zip(paths, range(0, len(paths))):
        #    for sub_path, time_step in zip(path['im_observations'], range(0, self.horizon)):
        #        data_matrix[path_step, time_step, :, :, :] = sub_path
        #        class_matrix[path_step, time_step, :] = path['class']
        #        dom_matrix[path_step, time_step, :] = path['dom']

        return dict(data=data_matrix, classes=class_matrix, domains=dom_matrix)
项目:HyperGAN    作者:255BITS    | 项目源码 | 文件源码
def sample(self, path, save_samples):
        gan = self.gan
        generator = gan.uniform_sample
        z_t = gan.uniform_encoder.sample
        x_t = gan.inputs.x

        sess = gan.session
        config = gan.config
        global x_v
        global z_v
        x_v = sess.run(x_t)
        x_v = np.tile(x_v[0], [gan.batch_size(),1,1,1])

        sample = sess.run(generator, {x_t: x_v})
        stacks = []
        bs = gan.batch_size()
        width = 5
        print(np.shape(x_v), np.shape(sample))
        stacks.append([x_v[1], sample[1], sample[2], sample[3], sample[4]])
        for i in range(bs//width-1):
            stacks.append([sample[i*width+width+j] for j in range(width)])
        images = np.vstack([np.hstack(s) for s in stacks])

        self.plot(images, path, save_samples)
        return [{'images': images, 'label': 'tiled x sample'}]
项目:iGAN    作者:junyanz    | 项目源码 | 文件源码
def shadow_image(self, img, pos):
        if img is None:
            return None
        weighted_img = np.ones((img.shape[0], img.shape[1]), np.uint8)
        x = int(pos.x() / self.scale)
        y = int(pos.y() / self.scale)

        weighted_img[y, x] = 0
        dist_img = cv2.distanceTransform(weighted_img, distanceType=cv2.cv.CV_DIST_L2, maskSize=5).astype(np.float32)
        dist_sigma = self.img_size/2.0
        dist_img_f = np.exp(-dist_img / dist_sigma)
        dist_img_f = np.tile(dist_img_f[..., np.newaxis], [1,1,3])
        l = 0.25
        img_f = img.astype(np.float32)
        rst_f = (img_f * l + (1-l) * (img_f * dist_img_f + (1-dist_img_f)*255.0))
        rst = rst_f.astype(np.uint8)
        return rst
项目:SlidingWindowVideoTDA    作者:ctralie    | 项目源码 | 文件源码
def CSMToBinary(D, Kappa):
    """
    Turn a cross-similarity matrix into a binary cross-simlarity matrix
    If Kappa = 0, take all neighbors
    If Kappa < 1 it is the fraction of mutual neighbors to consider
    Otherwise Kappa is the number of mutual neighbors to consider
    """
    N = D.shape[0]
    M = D.shape[1]
    if Kappa == 0:
        return np.ones((N, M))
    elif Kappa < 1:
        NNeighbs = int(np.round(Kappa*M))
    else:
        NNeighbs = Kappa
    J = np.argpartition(D, NNeighbs, 1)[:, 0:NNeighbs]
    I = np.tile(np.arange(N)[:, None], (1, NNeighbs))
    V = np.ones(I.size)
    [I, J] = [I.flatten(), J.flatten()]
    ret = sparse.coo_matrix((V, (I, J)), shape=(N, M))
    return ret.toarray()