Python numpy 模块,ones_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.ones_like()

项目:guided-filter    作者:lisabug    | 项目源码 | 文件源码
def blur(I, r):
    """
    This method performs like cv2.blur().

    Parameters
    ----------
    I: NDArray
        Filtering input
    r: int
        Radius of blur filter

    Returns
    -------
    q: NDArray
        Blurred output of I.
    """
    ones = np.ones_like(I, dtype=np.float32)
    N = box_filter(ones, r)
    ret = box_filter(I, r)
    return ret / N
项目:segmentation_DLMI    作者:imatge-upc    | 项目源码 | 文件源码
def load_ROI_mask(self):

        proxy = nib.load(self.FLAIR_FILE)
        image_array = np.asarray(proxy.dataobj)

        mask = np.ones_like(image_array)
        mask[np.where(image_array < 90)] = 0

        # img = nib.Nifti1Image(mask, proxy.affine)
        # nib.save(img, join(modalities_path,'mask.nii.gz'))

        struct_element_size = (20, 20, 20)
        mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
        mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
            np.int)

        return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool')
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def make_3d_mask(img_shape, center, radius, shape='sphere'):
    mask = np.zeros(img_shape)
    radius = np.rint(radius)
    center = np.rint(center)
    sz = np.arange(int(max(center[0] - radius, 0)), int(max(min(center[0] + radius + 1, img_shape[0]), 0)))
    sy = np.arange(int(max(center[1] - radius, 0)), int(max(min(center[1] + radius + 1, img_shape[1]), 0)))
    sx = np.arange(int(max(center[2] - radius, 0)), int(max(min(center[2] + radius + 1, img_shape[2]), 0)))
    sz, sy, sx = np.meshgrid(sz, sy, sx)
    if shape == 'cube':
        mask[sz, sy, sx] = 1.
    elif shape == 'sphere':
        distance2 = ((center[0] - sz) ** 2
                     + (center[1] - sy) ** 2
                     + (center[2] - sx) ** 2)
        distance_matrix = np.ones_like(mask) * np.inf
        distance_matrix[sz, sy, sx] = distance2
        mask[(distance_matrix <= radius ** 2)] = 1
    elif shape == 'gauss':
        z, y, x = np.ogrid[:mask.shape[0], :mask.shape[1], :mask.shape[2]]
        distance = ((z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2)
        mask = np.exp(- 1. * distance / (2 * radius ** 2))
        mask[(distance > 3 * radius ** 2)] = 0
    return mask
项目:sef    作者:passalis    | 项目源码 | 文件源码
def sim_target_fixed(target_data, target_labels, sigma, idx, target_params):
    """
    Sets as target to have fixed similarity between all the training samples
    :param target_data: (not used)
    :param target_labels: (not used)
    :param sigma: not used
    :param idx: indices of the data samples to be used for the calculation of the similarity matrix
    :param target_params: expect to found the 'target_value' here
    :return: the similarity matrix and the corresponding mask
    """
    if 'target_value' not in target_params:
        target_params['target_value'] = 0.0

    Gt = np.ones((len(idx), len(idx)))
    Gt = Gt * target_params['target_value']
    Gt_mask = np.ones_like(Gt)

    return np.float32(Gt), np.float32(Gt_mask)
项目:em_examples    作者:geoscixyz    | 项目源码 | 文件源码
def getCylinderPoints(xc,zc,r):
    xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
    xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
    # Top half of cylinder
    zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
    # Bottom half of cylinder
    zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
    # Shift from x = 0 to xc
    xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
    xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)

    topHalf = np.vstack([xLoc1,zLoc1]).T
    topHalf = topHalf[0:-1,:]
    bottomhalf = np.vstack([xLoc2,zLoc2]).T
    bottomhalf = bottomhalf[0:-1,:]

    cylinderPoints = np.vstack([topHalf,bottomhalf])
    cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
    return cylinderPoints
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
               np.longdouble, np.clongdouble]
        for dt in dts:
            c = np.ones(53, dtype=np.bool)
            assert_equal(np.where( c, dt(0), dt(1)), dt(0))
            assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
            assert_equal(np.where(True, dt(0), dt(1)), dt(0))
            assert_equal(np.where(False, dt(0), dt(1)), dt(1))
            d = np.ones_like(c).astype(dt)
            e = np.zeros_like(d)
            r = d.astype(dt)
            c[7] = False
            r[7] = e[7]
            assert_equal(np.where(c, e, e), e)
            assert_equal(np.where(c, d, e), r)
            assert_equal(np.where(c, d, e[0]), r)
            assert_equal(np.where(c, d[0], e), r)
            assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
            assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
            assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
            assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
            assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
            assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
            assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def plot_series(series):
    plt.figure(1)
    # colors = [np.array([1, 0.1, 0.1]), np.array([0.1, 1, 0.1]), np.array([0.1, 0.1, 1])]
    colors = ['m', 'g', 'r', 'b', 'y']
    for i, s in enumerate(series):
        print(s['x'], s['y'], s['std'], s['label'])
        small_number = np.ones_like(s['x']) * (s['x'][1]*0.1)
        x_axis = np.where(s['x'] == 0, small_number, s['x'])
        plt.plot(x_axis, s['y'], color=colors[i], label=s['label'])
        plt.fill_between(x_axis, s['y'] - s['std'], s['y'] + s['std'], color=colors[i], alpha=0.2)
    plt.semilogx()
    plt.xlabel('MI reward bonus')
    plt.ylabel('Final intrinsic reward')
    plt.title('Final intrinsic reward in pointMDP with 10 good modes')
    plt.legend(loc='best')
    plt.show()
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def predict(self, path):
        if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
            expanded_path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
        else:  # when it comes from log_diagnostics it's already expanded (or if it was never aggregated)
            expanded_path = path

        bonus = self.visitation_bonus * self.predict_count(expanded_path) + \
                self.dist_from_reset_bonus * self.predict_dist_from_reset(expanded_path)
        if self.snn_H_bonus:  # I need the if because the snn bonus is only available when there are latents
            bonus += self.snn_H_bonus * self.predict_entropy(expanded_path)

        total_bonus = bonus + self.survival_bonus * np.ones_like(bonus)

        if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
            aggregated_bonus = []
            full_path_rewards = path['env_infos']['full_path']['rewards']
            total_steps = 0
            for sub_rewards in full_path_rewards:
                aggregated_bonus.append(np.sum(total_bonus[total_steps:total_steps + len(sub_rewards)]))
                total_steps += len(sub_rewards)
            total_bonus = aggregated_bonus

        return np.array(total_bonus)
项目:niwqg    作者:crocha700    | 项目源码 | 文件源码
def _initialize_filter(self):

        """Set up spectral filter or dealiasing."""

        if self.use_filter:
            cphi=0.65*pi
            wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
            self.filtr = np.exp(-23.6*(wvx-cphi)**4.)
            self.filtr[wvx<=cphi] = 1.
            self.logger.info(' Using filter')
        elif self.dealias:
            self.filtr = np.ones_like(self.wv2)
            self.filtr[self.nx/3:2*self.nx/3,:] = 0.
            self.filtr[:,self.ny/3:2*self.ny/3] = 0.
            self.logger.info(' Dealiasing with 2/3 rule')
        else:
            self.filtr = np.ones_like(self.wv2)
            self.logger.info(' No dealiasing; no filter')
项目:niwqg    作者:crocha700    | 项目源码 | 文件源码
def _initialize_filter(self):

        """ Set up spectral filter or dealiasing."""

        if self.use_filter:
            cphi=0.65*pi
            wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
            self.filtr = np.exp(-23.6*(wvx-cphi)**4.)
            self.filtr[wvx<=cphi] = 1.
            self.logger.info(' Using filter')
        elif self.dealias:
            self.filtr = np.ones_like(self.wv2)
            self.filtr[self.nx//3:2*self.nx//3,:] = 0.
            self.filtr[:,self.ny//3:2*self.ny//3] = 0.
            self.logger.info(' Dealiasing with 2/3 rule')
        else:
            self.filtr = np.ones_like(self.wv2)
            self.logger.info(' No dealiasing; no filter')
项目:trappist1    作者:rodluger    | 项目源码 | 文件源码
def flux(self, time, planets = None, cadence = 'lc'):
    '''

    '''

    # Ensure it's a list
    if planets is None:
      planets = self.planets
    elif type(planets) is str:
      planets = [planets]

    # Compute flux for each planet
    flux = np.ones_like(time)
    for planet in planets:
      if cadence == 'lc':
        model = ps.Transit(per = self.period[planet], b = self.b[planet], RpRs = self.RpRs[planet], t0 = self.t0[planet], 
                           rhos = self.rhos, ecc = self.ecc[planet], w = self.w[planet] * np.pi / 180., u1 = self.u1, 
                           u2 = self.u2, times = self.times[planet])
      else:
        model = ps.Transit(per = self.period[planet], b = self.b[planet], RpRs = self.RpRs[planet], t0 = self.t0[planet], 
                           rhos = self.rhos, ecc = self.ecc[planet], w = self.w[planet] * np.pi / 180., u1 = self.u1, 
                           u2 = self.u2, times = self.times[planet], exptime = ps.KEPSHRTCAD)
      flux *= model(time)

    return flux
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_flat_tensor_dot_tensor():
    """
    Ensure that a flattened argument axis is not unflattend in the result.

    """
    H = ng.make_axis(2)
    W = ng.make_axis(7)
    C = ng.make_axis(3)
    K = ng.make_axis(11)

    axes_a = ng.make_axes([H, W, C])
    a = ng.constant(np.ones(axes_a.lengths), axes=axes_a)
    flat_a = ng.flatten_at(a, 2)

    axes_b = ng.make_axes([C, K])
    b = ng.constant(np.ones(axes_b.lengths), axes=axes_b)

    result = ng.dot(b, flat_a)

    with ExecutorFactory() as factory:
        result_fun = factory.executor(result)
        result_val = result_fun()

    result_correct = np.ones_like(result_val) * C.length
    ng.testing.assert_allclose(result_val, result_correct)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __call__(self, input_data, weights):
        '''
        input_data in this case is a numpy array with batch_size on axis 1
        and weights is a matrix with 1 column
        '''
        if self.state is None:
            self.state = np.ones_like(weights)

        if self.velocity is None:
            self.velocity = np.zeros_like(weights)

        gradient = - input_data.mean(axis=1)

        self.state[:] = self.decay_rate * self.state + \
            (1.0 - self.decay_rate) * np.square(gradient)

        self.velocity = self.velocity * self.momentum + \
            self.learning_rate * gradient / np.sqrt(self.state + self.epsilon) + \
            self.learning_rate * self.wdecay * weights
        weights[:] = weights - self.velocity

        return weights
项目:3D_Dense_Transformer_Networks    作者:JohnYC1995    | 项目源码 | 文件源码
def Affine_test(self,N,sizex,sizey,sizez,times,stop_time,typeofT,colors):
        for i in range(times):
            # Theta
            idx = np.random.uniform(-1, 1);idy = np.random.uniform(-1, 1);idz = np.random.uniform(-1, 1)
            swithx = np.random.uniform(0,1);swithy = np.random.uniform(0,1);swithz = np.random.uniform(0,1)
            rotatex = np.random.uniform(-1, 1);rotatey = np.random.uniform(-1, 1);rotatez = np.random.uniform(-1, 1)
            cx = np.array([idx,rotatey,rotatez,swithx]);cy = np.array([rotatex,idy,rotatez,swithy]);cz = np.array([rotatex,rotatey,idz,swithz])
            # Source Grid
            x = np.linspace(-sizex, sizex, N);y = np.linspace(-sizey, sizey, N);z = np.linspace(-sizez, sizez, N)
            x, y, z = np.meshgrid(x, y, z)
            xgs, ygs, zgs = x.flatten(), y.flatten(),z.flatten()
            gps = np.vstack([xgs, ygs, zgs, np.ones_like(xgs)]).T
            # transform
            xgt = np.dot(gps, cx);ygt = np.dot(gps, cy);zgt = np.dot(gps, cz)
            # display
            showIm = ShowImage()
            showIm.Show_transform(xgs,ygs,zgs,xgt,ygt,zgt,sizex,sizey,sizez,stop_time,typeofT,N,colors)
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def pseudo_call_gene_low_level(simulationTime=None, rho=None, temperatureHat=None, densityHat=None, safetyFactor=None, 
                        ionMass=1, ionCharge=1, Lref=None, Bref=None, rhoStar=None, Tref=None, nref=None, checkpointSuffix=0):
    """Function  to emulate a call to GENE with the same input arguments and return values.

    Used for testing other code when the overhead of an actual startup of GENE is not needed.  Of course, this can only
    be used to test correctness of syntax, not correctness of values.
    """

    # check inputs have been provided
    for var in (simulationTime, rho, temperatureHat, densityHat, safetyFactor, Lref, Bref, rhoStar, Tref, nref):
        if var is None:
            #logging.error("Input variables must be provided in call_gene_low_level.")
            raise ValueError

    MPIrank = 1
    dVdxHat = np.ones_like(rho)
    sqrt_gxx = np.ones_like(rho)
    avgParticleFluxHat = np.ones_like(rho)
    avgHeatFluxHat = np.ones_like(rho)
    temperatureOutput = np.ones_like(rho)
    densityOutput = np.ones_like(rho)
    return (MPIrank, dVdxHat, sqrt_gxx, avgParticleFluxHat, avgHeatFluxHat, temperatureOutput, densityOutput)
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def _add_noise(v, amplitude):
        """Add noise to an array v in the following way:.

                    noisy_v = (1+h) * v

        where h is a random noise with specified standard deviation.  The noise h is trimmed to 
        be zero close to both boundaries.

        Inputs:
          v              input to add noise to (array)           
          ampltitude     specified standard deviation of noise (scalar)
          tac            autocorrelation time measured in discrete samples (scalar)
        Outputs:
          noisy_v        v with noise
        """
        # generate noise that is constant throughout space
        h  = np.random.normal(scale=amplitude) * np.ones_like(v)  
        # damped the sides of the noise close to the boundaries
        h = dampen_sides(h)
        noisy_v = (1 + h) * v
        return noisy_v
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def __call__(self, t, x, n):
        # Define the contributions to the H coefficients for the Shestakov Problem
        H1 = np.ones_like(x)
        #H7 = shestakov_nonlinear_diffusion.H7contrib_Source(x)
        H7 = source(x)
        (H2turb, H3, extradata) = self.turbhandler.Hcontrib_turbulent_flux(n)
        H4 = None
        H6 = None
        # add "other" diffusive contributions by specifying a diffusivity, H2 = V'D [but V' = 1 here]
        H2constdiff = 0.03

        def diffusivity_right(x):
            diffusivity = np.zeros_like(x)
            xr = 0.85
            D0 = 7
            diffusivity[x > xr] = D0
            return diffusivity

        H2 = H2turb + H2constdiff    
        #H2 = H2turb + H2constdiff + diffusivity_right(x)   # if adding const to right edge
        return (H1, H2, H3, H4, H6, H7, extradata)
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def __call__(self, t, x, n):
        # Define the contributions to the H coefficients for the Shestakov Problem
        H1 = np.ones_like(x)
        #H7 = shestakov_nonlinear_diffusion.H7contrib_Source(x)
        H7 = source(x)
        (H2turb, H3, extradata) = self.turbhandler.Hcontrib_turbulent_flux(n)
        H4 = None
        H6 = None
        # add "other" diffusive contributions by specifying a diffusivity, H2 = V'D [but V' = 1 here]
        H2constdiff = 0.03

        def diffusivity_right(x):
            diffusivity = np.zeros_like(x)
            xr = 0.85
            D0 = 7
            diffusivity[x > xr] = D0
            return diffusivity

        H2 = H2turb + H2constdiff    
        #H2 = H2turb + H2constdiff + diffusivity_right(x)   # if adding const to right edge
        return (H1, H2, H3, H4, H6, H7, extradata)
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def setup_parameters_different_grids_tango_inside():
    # set up radial grids with Tango's outer radial boundary radially inward that of GENE.
    simulationTime = 0.4
    Lref = 1.65
    Bref = 2.5
    majorRadius = 1.65
    minorRadius = 0.594
    rhoStar = 1/140
    checkpointSuffix = 999

    numRadialPtsTango = 100
    numRadialPtsGene = 80
    rhoTango = np.linspace(0.1, 0.8, numRadialPtsTango)      # rho = r/a
    rhoGene = np.linspace(0.2, 0.9, numRadialPtsGene)

    rTango = rhoTango * minorRadius    # physical radius r
    rGene = rhoGene * minorRadius
    safetyFactorGeneGrid = tango.parameters.analytic_safety_factor(rGene, minorRadius, majorRadius)

    e = 1.60217662e-19          # electron charge
    temperatureGeneGrid = 1000 * e * np.ones_like(rGene)
    densityTangoGrid = 1e19 * np.ones_like(rTango)
    densityGeneGrid = 1e19 * np.ones_like(rGene)
    gridMapper = tango.interfacegrids_gene.GridInterfaceTangoInside(rTango, rGene)
    return (simulationTime, rTango, rGene, temperatureGeneGrid, densityTangoGrid, densityGeneGrid, safetyFactorGeneGrid, Lref, Bref, majorRadius, minorRadius, rhoStar, gridMapper, checkpointSuffix)
项目:tango    作者:LLNL    | 项目源码 | 文件源码
def setup_parameters_different_grids_tango_outside():
    # set up radial grids with Tango's outer radial boundary radially inward that of GENE.
    simulationTime = 0.4
    Lref = 1.65
    Bref = 2.5
    majorRadius = 1.65
    minorRadius = 0.594
    rhoStar = 1/140
    checkpointSuffix = 999

    numRadialPtsTango = 100
    numRadialPtsGene = 80
    rhoTango = np.linspace(0.1, 0.9, numRadialPtsTango)      # rho = r/a
    rhoGene = np.linspace(0.2, 0.7, numRadialPtsGene)

    rTango = rhoTango * minorRadius    # physical radius r
    rGene = rhoGene * minorRadius
    safetyFactorGeneGrid = tango.parameters.analytic_safety_factor(rGene, minorRadius, majorRadius)

    e = 1.60217662e-19          # electron charge
    temperatureGeneGrid = 1000 * e * np.ones_like(rGene)
    densityTangoGrid = 1e19 * np.ones_like(rTango)
    densityGeneGrid = 1e19 * np.ones_like(rGene)
    gridMapper = tango.interfacegrids_gene.GridInterfaceTangoOutside(rTango, rGene)
    return (simulationTime, rTango, rGene, temperatureGeneGrid, densityTangoGrid, densityGeneGrid, safetyFactorGeneGrid, Lref, Bref, majorRadius, minorRadius, rhoStar, gridMapper, checkpointSuffix)
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_energy_conservation_sech2disk_manyparticles():
    # Test that energy is conserved for a self-gravitating disk
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    g= wendy.nbody(x,v,m,0.05)
    E= wendy.energy(x,v,m)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
        cnt+= 1
    return None
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_energy_conservation_sech2disk_manyparticles():
    # Test that energy is conserved for a self-gravitating disk
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    omega= 1.1
    g= wendy.nbody(x,v,m,0.05,omega=omega)
    E= wendy.energy(x,v,m,omega=omega)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E) < 10.**-10., "Energy not conserved during simple N-body integration with external harmonic potential"
        cnt+= 1
    return None
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_energy_conservation_sech2disk_manyparticles():
    # Test that energy is conserved for a self-gravitating disk
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
    E= wendy.energy(x,v,m)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration"
        cnt+= 1
    return None
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_notracermasses():
    # approx should work with tracer sheets
    # Test that energy is conserved for a self-gravitating disk
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    m[N//2:]= 0.
    m*= 2.
    g= wendy.nbody(x,v,m,0.05,approx=True,nleap=1000)
    E= wendy.energy(x,v,m)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        assert numpy.fabs(wendy.energy(tx,tv,m)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with some tracer particles"
        cnt+= 1
    return None
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_energy_conservation_sech2disk_manyparticles():
    # Test that energy is conserved for a self-gravitating disk
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    omega= 1.1
    g= wendy.nbody(x,v,m,0.05,omega=omega,approx=True,nleap=1000)
    E= wendy.energy(x,v,m,omega=omega)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        assert numpy.fabs(wendy.energy(tx,tv,m,omega=omega)-E)/E < 10.**-6., "Energy not conserved during approximate N-body integration with external harmonic potential"
        cnt+= 1
    return None
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def test_againstexact_sech2disk_manyparticles():
    # Test that the exact N-body and the approximate N-body agree
    N= 101
    totmass= 1.
    sigma= 1.
    zh= 2.*sigma**2./totmass
    x= numpy.arctanh(2.*numpy.random.uniform(size=N)-1)*zh
    v= numpy.random.normal(size=N)*sigma
    v-= numpy.mean(v) # stabilize
    m= numpy.ones_like(x)/N*(1.+0.1*(2.*numpy.random.uniform(size=N)-1))
    omega= 1.1
    g= wendy.nbody(x,v,m,0.05,approx=True,nleap=2000,omega=omega)
    ge= wendy.nbody(x,v,m,0.05,omega=omega)
    cnt= 0
    while cnt < 100:
        tx,tv= next(g)
        txe,tve= next(ge)
        assert numpy.all(numpy.fabs(tx-txe) < 10.**-5.), "Exact and approximate N-body give different positions"
        assert numpy.all(numpy.fabs(tv-tve) < 10.**-5.), "Exact and approximate N-body give different positions"
        cnt+= 1
    return None
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def gain_substitution_scalar(gain, x, xwt):
    nants, nchan, nrec, _ = gain.shape
    newgain = numpy.ones_like(gain, dtype='complex')
    gwt = numpy.zeros_like(gain, dtype='float')

    # We are going to work with Jones 2x2 matrix formalism so everything has to be
    # converted to that format
    x = x.reshape(nants, nants, nchan, nrec, nrec)
    xwt = xwt.reshape(nants, nants, nchan, nrec, nrec)

    for ant1 in range(nants):
        for chan in range(nchan):
            # Loop over e.g. 'RR', 'LL, or 'xx', 'YY' ignoring cross terms
            top = numpy.sum(x[:, ant1, chan, 0, 0] *
                            gain[:, chan, 0, 0] * xwt[:, ant1, chan, 0, 0], axis=0)
            bot = numpy.sum((gain[:, chan, 0, 0] * numpy.conjugate(gain[:, chan, 0, 0]) *
                             xwt[:, ant1, chan, 0, 0]).real, axis=0)

            if bot > 0.0:
                newgain[ant1, chan, 0, 0] = top / bot
                gwt[ant1, chan, 0, 0] = bot
            else:
                newgain[ant1, chan, 0, 0] = 0.0
                gwt[ant1, chan, 0, 0] = 0.0
    return newgain, gwt
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def test_balance_weights(self):
        labels = [1, 1, -1, -1, -1]
        predictions = [-1, -1, 1, 1, 1]  # all errors
        default_weights = np.abs(center(np.asarray(labels)))

        exp_error = balanced_classification_error(labels, predictions)
        error = balanced_classification_error(labels, predictions, default_weights)
        assert_equals(exp_error, error)

        null_weights = np.ones_like(labels)
        exp_error = classification_error(labels, predictions)
        error = balanced_classification_error(labels, predictions, null_weights)
        assert_equals(exp_error, error)

        # Balanced classes
        labels = [1, 1, 1, -1, -1, -1]
        predictions = [-1, -1, -1, 1, 1, 1]  # all errors
        exp_error = classification_error(labels, predictions)
        error = balanced_classification_error(labels, predictions)
        assert_equals(exp_error, error)
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def test_balance_weights():
    """Test balanced classification error with custom weights."""
    labels = [1, 1, -1, -1, -1]
    predictions = [-1, -1, 1, 1, 1] # all errors
    default_weights = np.abs(center(np.asarray(labels)))

    exp_error = balanced_classification_error(labels, predictions)
    error = balanced_classification_error(labels, predictions, default_weights)
    assert_equals(exp_error, error)

    null_weights = np.ones_like(labels)
    exp_error = classification_error(labels, predictions)
    error = balanced_classification_error(labels, predictions, null_weights)
    assert_equals(exp_error, error)

    # Balanced classes
    labels = [1, 1, 1, -1, -1, -1]
    predictions = [-1, -1, -1, 1, 1, 1] # all errors
    exp_error = classification_error(labels, predictions)
    error = balanced_classification_error(labels, predictions)
    assert_equals(exp_error, error)
项目:marseille    作者:vene    | 项目源码 | 文件源码
def initialize_labels(self, Y):

        y_nodes_flat = [y_val for y in Y for y_val in y.nodes]
        y_links_flat = [y_val for y in Y for y_val in y.links]
        self.prop_encoder_ = LabelEncoder().fit(y_nodes_flat)
        self.link_encoder_ = LabelEncoder().fit(y_links_flat)

        self.n_prop_states = len(self.prop_encoder_.classes_)
        self.n_link_states = len(self.link_encoder_.classes_)

        self.prop_cw_ = np.ones_like(self.prop_encoder_.classes_,
                                     dtype=np.double)
        self.link_cw_ = compute_class_weight(self.class_weight,
                                             self.link_encoder_.classes_,
                                             y_links_flat)

        self.link_cw_ /= self.link_cw_.min()

        logging.info('Setting node class weights {}'.format(", ".join(
            "{}: {}".format(lbl, cw) for lbl, cw in zip(
                self.prop_encoder_.classes_, self.prop_cw_))))

        logging.info('Setting link class weights {}'.format(", ".join(
            "{}: {}".format(lbl, cw) for lbl, cw in zip(
                self.link_encoder_.classes_, self.link_cw_))))
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def get_adagrad(learning_rate=0.5):
    """
    Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
    John Duchi, Elad Hazan and Yoram Singer, Journal of Machine Learning Research 12 (2011) 2121-2159
    http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
    """
    sum_square_gradient = None

    def adagrad(gradient):
        nonlocal sum_square_gradient

        if sum_square_gradient is None:
            sum_square_gradient = np.ones_like(gradient)
        sum_square_gradient += gradient ** 2
        return learning_rate / np.sqrt(sum_square_gradient)

    return adagrad
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def load_data(self):
        # Create the data using magic numbers to approximate the figure in
        # canevet_icml2016
        x = np.linspace(0, 1, self.N).astype(np.float32)
        ones = np.ones_like(x).astype(int)
        boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5

        data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
        data[:, :, 0] = 1-x
        for i in range(self.N):
            data[i, :, 1] = 1-x[i]
            data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
            data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
        data = data.reshape(-1, 3)
        np.random.shuffle(data)

        # Create train and test arrays
        split = int(len(data)*self.test_split)
        X_train = data[:-split, :2]
        y_train = data[:-split, 2]
        X_test = data[-split:, :2]
        y_test = data[-split:, 2]

        return (X_train, y_train), (X_test, y_test)
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def __init__(self, dataset, reweighting, model, large_batch=1024,
                 forward_batch_size=128, steps_per_epoch=300, recompute=2,
                 s_e=(1, 1), n_epochs=1):
        super(OnlineBatchSelectionSampler, self).__init__(
            dataset,
            reweighting,
            model,
            large_batch=large_batch,
            forward_batch_size=forward_batch_size
        )

        # The configuration of OnlineBatchSelection
        self.steps_per_epoch = steps_per_epoch
        self.recompute = recompute
        self.s_e = s_e
        self.n_epochs = n_epochs

        # Mutable variables to be updated
        self._batch = 0
        self._epoch = 0
        self._raw_scores = np.ones((len(dataset.train_data),))
        self._scores = np.ones_like(self._raw_scores)
        self._ranks = np.arange(len(dataset.train_data))
项目:wtte-rnn    作者:ragulpr    | 项目源码 | 文件源码
def get_data(discrete_time):
    y_test, y_train, u_train = generate_weibull(A=real_a,
                                                B=real_b,
                                                # <np.inf -> impose censoring
                                                C=censoring_point,
                                                shape=[n_sequences,
                                                       n_timesteps, 1],
                                                discrete_time=discrete_time)
    # With random input it _should_ learn weight 0
    x_train = x_test = np.random.uniform(
        low=-1, high=1, size=[n_sequences, n_timesteps, n_features])

    # y_test is uncencored data
    y_test = np.append(y_test, np.ones_like(y_test), axis=-1)
    y_train = np.append(y_train, u_train, axis=-1)
    return y_train, x_train, y_test, x_test
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_ignore_nans(self):
        """ Test that NaNs are ignored. """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        source.append(np.full_like(source[0], np.nan))
        product = cprod(source, ignore_nan = True)
        self.assertTrue(np.allclose(product, np.ones_like(product)))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_dtype(self):
        """ Test that dtype argument is working """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = cprod(source, dtype = np.int)
        self.assertTrue(np.allclose(product, np.ones_like(product)))
        self.assertEqual(product.dtype, np.int)
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_trivial(self):
        """ Test a product of ones """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(iprod(source))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_ignore_nans(self):
        """ Test that NaNs are ignored. """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        source.append(np.full_like(source[0], np.nan))
        product = last(iprod(source, ignore_nan = True))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_dtype(self):
        """ Test that dtype argument is working """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(iprod(source, dtype = np.int))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
        self.assertEqual(product.dtype, np.int)
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_trivial(self):
        """ Test a product of ones """
        source = [np.ones((16,), dtype = np.float) for _ in range(10)]
        product = last(inanprod(source))
        self.assertTrue(np.allclose(product, np.ones_like(product)))
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def composed_triangular_mesh(triangular_mesh_dict):
    start_time = time()
    print "--> Composing triangular mesh..."

    mesh = TriangularMesh()

    triangle_cell_matching = {}

    mesh_points = np.concatenate([triangular_mesh_dict[c].points.keys() for c in triangular_mesh_dict.keys()])
    mesh_point_positions = np.concatenate([triangular_mesh_dict[c].points.values() for c in triangular_mesh_dict.keys()])
    mesh.points = dict(zip(mesh_points,mesh_point_positions))

    mesh_triangles = np.concatenate([triangular_mesh_dict[c].triangles.values() for c in triangular_mesh_dict.keys()])
    mesh.triangles = dict(zip(np.arange(len(mesh_triangles)),mesh_triangles))

    mesh_cells = np.concatenate([c*np.ones_like(triangular_mesh_dict[c].triangles.keys()) for c in triangular_mesh_dict.keys()])
    triangle_cell_matching = dict(zip(np.arange(len(mesh_triangles)),mesh_cells))


    # for c in triangular_mesh_dict.keys():
    #     cell_start_time = time()

    #     cell_mesh = triangular_mesh_dict[c]
    #     # mesh_point_max_id = np.max(mesh.points.keys()) if len(mesh.points)>0 else 0
    #     mesh.points.update(cell_mesh.points)

    #     if len(cell_mesh.triangles)>0:
    #         mesh_triangle_max_id = np.max(mesh.triangles.keys()) if len(mesh.triangles)>0 else 0
    #         mesh.triangles.update(dict(zip(list(np.array(cell_mesh.triangles.keys())+mesh_triangle_max_id),cell_mesh.triangles.values())))
    #         triangle_cell_matching.update(dict(zip(list(np.array(cell_mesh.triangles.keys())+mesh_triangle_max_id),[c for f in cell_mesh.triangles]))) 

    #     cell_end_time = time()
    #     print "  --> Adding cell ",c," (",len(cell_mesh.triangles)," triangles )    [",cell_end_time-cell_start_time,"s]"

    end_time = time()
    print "<-- Composing triangular mesh     [",end_time-start_time,"]"
    return mesh, triangle_cell_matching
项目:search-MjoLniR    作者:wikimedia    | 项目源码 | 文件源码
def _loess_predict(X, y_tr, X_pred, bandwidth):
    X_tr = np.column_stack((np.ones_like(X), X))
    X_te = np.column_stack((np.ones_like(X_pred), X_pred))
    y_te = []
    for x in X_te:
        ws = np.exp(-np.sum((X_tr - x)**2, axis=1) / (2 * bandwidth**2))
        W = scipy.sparse.dia_matrix((ws, 0), shape=(X_tr.shape[0],) * 2)
        theta = np.linalg.pinv(X_tr.T.dot(W.dot(X_tr))).dot(X_tr.T.dot(W.dot(y_tr)))
        y_te.append(np.dot(x, theta))
    return np.array(y_te)
项目:AutoSleepScorerDev    作者:skjerns    | 项目源码 | 文件源码
def __init__(self, X, Y, batch_size, cropsize=0):

        assert len(X) == len(Y), 'X and Y must be the same length {}!={}'.format(len(X),len(Y))
        print('starting balanced generator')
        self.X = X
        self.Y = Y
        self.cropsize=cropsize
        self.batch_size = int(batch_size)
        self.pmatrix = np.ones_like(self.Y)
        self.reset()
项目:seqhawkes    作者:mlukasik    | 项目源码 | 文件源码
def gamma_fullsum_grad(
    gamma,
    node_vec,
    eventmemes,
    etimes,
    T,
    mu,
    alpha,
    omega,
    W,
    beta,
    kernel_evaluate,
    K_evaluate,
    ):
    '''
    it actually returns negated gradient.
    '''

    gradres = np.ones_like(gamma) * -T * np.sum(mu)
    for (eventidx, (etime1, infected_u, eventmeme)) in \
        enumerate(izip(etimes, node_vec, eventmemes)):
        gradres[eventmeme] += mu[infected_u] \
            / np.exp(event_nonapproximated_logintensity(
            infected_u,
            eventmeme,
            etime1,
            T,
            etimes[:eventidx],
            node_vec[:eventidx],
            eventmemes[:eventidx],
            mu,
            gamma,
            omega,
            alpha,
            kernel_evaluate,
            ))
    return -gradres


# =====
项目:Optimizer-cotw    作者:alkaya    | 项目源码 | 文件源码
def relu(x, deriv=False):
        '''
        Rectifier function
        :param x: np.array
        :param deriv: derivate wanted ?
        :return:
        '''
        if deriv:
            return np.ones_like(x) * (x > 0)

        return x * (x > 0)