Python numpy.random 模块,normal() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用numpy.random.normal()

项目:math_stat_python    作者:Sammers21    | 项目源码 | 文件源码
def test(m=mean, c=count_of_elements):
    global h_one_true, h_two_true
    # of course sum is 10000
    h_one_true = 0
    h_two_true = 0
    for i in range(10000):
        # TODO ???????? ??? ?? ???? ?????????????
        array_of_elements_from_distribution = l.normal(m, sigma, c)
        average = sum(array_of_elements_from_distribution) / c
        right = average + (cvantil0025gaussian * sigma / n.sqrt(c))
        left = average - (cvantil0025gaussian * sigma / n.sqrt(c))
        if left < mean < right:
            h_one_true += 1
        else:
            h_two_true += 1
    return h_one_true, h_two_true
项目:Safe-RL-Benchmark    作者:befelix    | 项目源码 | 文件源码
def map(self, state):
        """Compute output in session.

        Make sure a default session is set when calling.
        """
        state = state.flatten()
        assert(self.state_space.contains(state))

        if self.sess is None:
            sess = tf.get_default_session()
        else:
            sess = self.sess
        mean, var = sess.run([self.a_pred, self.var], {self.X: [state]})

        action = np.array(normal(mean, var))
        action = action.reshape(self.action_space.shape)

        return action
项目:XTREE    作者:ai-se    | 项目源码 | 文件源码
def patchIt(i,testInst, config=False):
    # 1. Find where t falls
    C = changes() # Record changes
    testInst = pd.DataFrame(testInst).transpose()
    current = i.find(testInst, i.tree)
    node = current
    while node.lvl > -1:
      node = node.up  # Move to tree root

    leaves = flatten([i.leaves(_k) for _k in node.kids])
    try:
      if i.config:
        best = sorted([l for l in leaves if l.score<current.score], key=lambda F: i.howfar(current,F))[0]
      else:
        best = sorted([l for l in leaves if l.score<=0.01*current.score], key=lambda F: i.howfar(current,F))[0]
    except:
      return testInst.values.tolist()[0]

    def new(old, range):
      rad = abs(min(range[1]-old, old-range[1]))
      # return randn(old, rad) if rad else old
      # return uniform(old-rad,rad+old)
      return uniform(range[0],range[1])

    for ii in best.branch:
      before = testInst[ii[0]]
      if not ii in current.branch:
        then = testInst[ii[0]].values[0]
        now = ii[1] if i.config else new(testInst[ii[0]].values[0], ii[1])
        # print(current.branch,best.branch)
        testInst[ii[0]] = now
        C.save(name=ii[0], old=then, new=now)

    testInst[testInst.columns[-1]] = None
    i.change.append(C.log)
    return testInst.values.tolist()[0]
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_unit_vec(num, scale):
  from numpy.random import normal

  rnd = normal(size=(num,3))
  d = norm(rnd,axis=1)
  rnd[:] /= reshape(d, (num,1))
  return rnd*scale
项目:Video-Classification-Action-Recognition    作者:qijiezhao    | 项目源码 | 文件源码
def _get_orthogonal_init_weights(weights):
    fan_out = weights.size(0)
    fan_in = weights.size(1) * weights.size(2)*weights.size(3)*weights.size(4)

    u, _, v = svd(normal(0.0, 0.01, (fan_out, fan_in)), full_matrices=False)

    if u.shape == (fan_out, fan_in):
        return torch.Tensor(u.reshape(weights.size()))
    else:
        return torch.Tensor(v.reshape(weights.size()))
项目:bpy_lambda    作者:bcongdon    | 项目源码 | 文件源码
def skewedGauss(mu, sigma, bounds, upperSkewed=True):
    raw = gauss(mu, sigma)

    # Quicker to check an extra condition than do unnecessary math. . . .
    if raw < mu and not upperSkewed:
        out = ((mu - bounds[0]) / (3 * sigma)) * raw + ((mu * (bounds[0] - (mu - 3 * sigma))) / (3 * sigma))
    elif raw > mu and upperSkewed:
        out = ((mu - bounds[1]) / (3 * -sigma)) * raw + ((mu * (bounds[1] - (mu + 3 * sigma))) / (3 * -sigma))
    else:
        out = raw

    return out


# @todo create a def for generating an alpha and beta for a beta distribution
#   given a mu, sigma, and an upper and lower bound.  This proved faster in
#   profiling in addition to providing a much better distribution curve
#   provided multiple iterations happen within this function; otherwise it was
#   slower.
#   This might be a scratch because of the bounds placed on mu and sigma:
#
#   For alpha > 1 and beta > 1:
#   mu^2 - mu^3           mu^3 - mu^2 + mu
#   ----------- < sigma < ----------------
#      1 + mu                  2 - mu
#
##def generateBeta(mu, sigma, scale, repitions=1):
##    results = []
##
##    return results

# Creates rock objects:
项目:django-celery-rabbitmq-example    作者:Giangblackk    | 项目源码 | 文件源码
def fft_random(n):
    for i in range(n):
        x = random.normal(0, 0.1, 2000)
        y = fft(x)
        if(i%30 == 0):
            process_percent = int(100 * float(i)/float(n))
            current_task.update_state(state='PROGRESS',
                meta={'process_percent': process_percent})
    return random.random()
项目:django-celery-rabbitmq-example    作者:Giangblackk    | 项目源码 | 文件源码
def test(tid, n):
    for i in range(n):
        x = random.normal(0, 0.1, 2000)
        y = fft(x)
    result = dict()
    result['x'] = random.random()
    result['y'] = random.randint(100)
    return result


# @task_success.connect(sender='celeryapp.tasks.fft_random')
项目:riemann_workshop    作者:madedotcom    | 项目源码 | 文件源码
def get_page_latency():
    return normal(page_latency_mean, page_latency_stddev, 1)[0]
项目:riemann_workshop    作者:madedotcom    | 项目源码 | 文件源码
def get_asset_latency():
    return normal(asset_latency_mean, asset_latency_stddev, 1)[0]
项目:isp-data-pollution    作者:essandess    | 项目源码 | 文件源码
def diurnal_cycle_test(self):
        now = dt.datetime.now()
        tmhr = now.hour + now.minute/60.
        phase = npr.normal(14.,1.)
        exponent = min(0.667,self.chi2_mean_std(0.333,0.1))
        def cospow(x,e):  # flattened cosine with e < 1
            c = np.cos(x)
            return np.sign(c) * np.power(np.abs(c), e)
        diurn = max(0.,0.5*(1.+cospow((tmhr-phase)*(2.*np.pi/24.),exponent)))
        flr = min(0.1,self.chi2_mean_std(0.02,0.002))
        val = flr + (1.-flr)*diurn
        return npr.uniform() < val
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def __init__(self,param,grid):
        self.list_param=['sizevar']
        param.copy(self,self.list_param)

        self.list_param=['nh','msk','fill_halo']
        grid.copy(self,self.list_param)

        self.intensity=1e-1
        self.gamma = 1e-1
        self.t=0

        self.forc = random.normal(size=self.sizevar)*self.msk
        self.fill_halo(self.forc)
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def add_forcing(self,x,t,dxdt):
        """ add the forcing term on x[0]=the vorticity """

        dt = t-self.t
        self.t=t

        self.forc = (1-self.gamma*dt)*self.forc + dt*self.gamma * random.normal(size=self.sizevar)*self.msk
        self.fill_halo(self.forc)

        dxdt[0] += self.intensity * self.forc
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def sample(args):
    nBps = npr.randint(args.minBps, args.maxBps)
    bpLocs = [0] + sorted(npr.choice(args.seqLen-2, nBps-1, replace=False)+1) + [args.seqLen]
    bpDiffs = np.diff(bpLocs)
    heights = npr.randint(args.minHeight, args.maxHeight, nBps)
    Y = []
    for d, h in zip(bpDiffs, heights):
        Y += [h]*d
    Y = np.array(Y, dtype=np.float)

    X = Y + npr.normal(0, args.noise, (args.seqLen))
    return X, Y
项目:Kaleido    作者:vacancy    | 项目源码 | 文件源码
def conv(name, src, cout, cin, k, p=0, s=1, wstd=0.01):
    W = opr.parameter(npr.normal(scale=wstd, size=(cout, cin, k, k)).astype('float32'), name=name + ':W')
    b = opr.parameter(np.zeros(shape=(1, cout, 1, 1), dtype='float32'), name=name + ':b')
    y = opr.conv2d(src, W, padding=p, stride=s, name=name) + b
    return opr.tanh(y)
项目:Kaleido    作者:vacancy    | 项目源码 | 文件源码
def fc(name, src, cout, cin, wstd=0.01, nonlin=True):
    W = opr.parameter(npr.normal(scale=wstd, size=(cin, cout)).astype('float32'), name=name + ':W')
    b = opr.parameter(np.zeros(shape=(1, cout), dtype='float32'), name=name + ':b')
    y = opr.matmul(src, W) + b
    return opr.tanh(y) if nonlin else y
项目:robotics1project    作者:pchorak    | 项目源码 | 文件源码
def test():
    n = 20 # number of simulated measurements

    # Hidden parameters
    p0a = np.array([[215.0],[0],[-20]]) # mm
    ptc = np.array([[25.0],[10],[-30]]) # mm
    Rtc = math3D.rot([1.0,0.1,0.2],180) # deg

    # Simulate measurements
    angle_list = []
    pca_list = []
    for k in range(n):
        angles = (rng.normal(0,45),60*rng.rand(),60*rng.rand())
        # Dobot kinematics
        Rt0 = np.transpose(DobotModel.R0T(angles))
        p0t = np.transpose(np.matrix(DobotModel.forward_kinematics(angles)))
        # calculate camera vector
        pca = np.transpose(Rtc)*(Rt0*(p0a - p0t) - ptc)
        pca = pca + rng.normal(0,0.5,[3,1]) # add noise (mm)
        # add to lists
        angle_list.append(angles)
        pca_list.append(pca)

    # Estimate transformation
    (ptc_est,Rtc_est,p0a_est) = get_pose(angle_list,pca_list)
    ptc_est[2] = ptc_est[2] + p0a[2] - p0a_est[2] # could use all elements if confident about p0a
    print "ptc"
    print ptc
    print "ptc (estimate)"
    print ptc_est
    print "Rtc"
    print Rtc
    print "Rtc (estimate)"
    print Rtc_est
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def __init__(self, nstates=2, max_samples=10, klow=1.0e-1, khi=1.0e1,
                 randseed=None, sample_fudge=0.0, unsampled_states=0):
        self.max_samples = int(max_samples)
        self.nstates = int(nstates)
        # Randomize the HO parameters.
        seed(randseed)
        klow, khi = float(klow), float(khi)
        #spacing = uniform(self.nstates, size=self.nstates)
        #k = klow*(khi / klow)**(spacing / self.nstates)
        # k = uniform(float(klow), float(khi), size=self.nstates)
        k = klow + (khi - klow)*exponential(1.0, self.nstates)
        sigma = sqrt(1/k)
        x0 = uniform(-0.5*sigma.max(), 0.5*sigma.max(), size=self.nstates)
        # Choose which states to sample from.
        nsampled_states = self.nstates - int(unsampled_states)
        sampled_indices = choice(arange(self.nstates), nsampled_states, False)
        sampled_indices.sort()
        # Generate samples up to max.
        x_in = normal(0.0, 1.0, (nsampled_states, self.max_samples))
        x_in *= sigma[sampled_indices, newaxis]
        x_in += x0[sampled_indices, newaxis]
        self.data_size = zeros(self.nstates, int32) 
        self.data_size[sampled_indices] += self.max_samples
        # Randomly remove samples for uneven sampling.  Note that at least one
        # state must remain the same, otherwise max_samples is incorrect.
        # Also, we don't actually have to do anything to the actual samples, bc
        # the sample size is used as a mask!
        #
        del_max = int(sample_fudge*self.max_samples + 0.5) + 1
        if del_max > 1:
            sample_shift = randint(0, del_max, nsampled_states)
            if all(sample_shift > 0): # Randomly reset the shift for a state.
                sample_shift[choice(arange(nsampled_states))] = 0
            self.data_size[sampled_indices] -= sample_shift
        self.unsampled_indices = where(self.data_size == 0)[0]
        # Compute the energy in all states
        u_ijn = 0.5*(k[:, newaxis]*(x_in[:, newaxis, :] - x0[:, newaxis])**2)
        self.u_ijn = u_ijn
        self.f_actual = 0.5*log(k / k[0])[1:]
        self.x0 = x0
        self.x_jn = x_in
项目:piradar    作者:scivision    | 项目源码 | 文件源码
def sim_iono(tx,fs,dist_m,codelen,Nstd,Ajam,station_id,usefilter,outpath,verbose):
    awgn = (normal(scale=Nstd, size=tx.size) + 1j*normal(scale=Nstd, size=tx.size))

    jam = Ajam * waveform_to_file(station_id+1, codelen, filt=usefilter, outpath=outpath,verbose=verbose)

    # delay transmit signal and add undesired signals
    tdelay_sec = 2*dist_m / c
    print(f'refl. height {dist_m/1e3} km -> delay {tdelay_sec:.3e} sec')

    rx = delayseq(tx,tdelay_sec,fs) + awgn + jam

    return rx
项目:dynamic-walking    作者:stephane-caron    | 项目源码 | 文件源码
def estimate(self, dt, real, cur_est, noise_intensity):
        """
        Update an estimation under noise and delays.

        Parameters
        ----------
        dt : scalar
            Time since last estimation (usually one control cycle).
        real : array
            Ground-truth coordinates.
        cur_est : array
            Current estimation.
        noise_intensity : scalar
            Intensity of noise signal in [m] / [s].

        Returns
        -------
        estimate : array
            New estimate.
        """
        Delta = cur_est - real
        delay = Delta * exp(-dt / self.delay) if self.delay > 1e-4 else 0.
        if noise_intensity < 1e-4:
            return real + delay
        sigma = noise_intensity * dt
        noise = random.normal(0., sigma, size=real.shape)
        return real + delay + noise
项目:dynamic-walking    作者:stephane-caron    | 项目源码 | 文件源码
def __update_zmp(self, target, dt):
        dz = self.zmp_state.p - target
        delay = dz * exp(-dt / self.zmp_delay) if self.zmp_delay > 1e-4 else 0.
        if self.zmp_noise < 1e-4:
            self.zmp_state.set_pos(target + delay)
            return
        sigma = self.zmp_noise * dt
        noise = normal(0., sigma, size=target.shape)
        self.zmp_state.set_pos(target + delay + noise)
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def VaryDimension(num_samples, dimension = 5):
        Xmean = zeros(dimension)
        Xcov = identity(dimension)
        data_x = multivariate_normal(Xmean, Xcov, num_samples)
        data_z = transpose([normal(0,1,num_samples)])
        data_y = 20*sin(4*pi*(data_x[:,[0]]**2 + data_x[:,[1]]**2)) + data_z
        return data_x,data_y
项目:kerpy    作者:oxmlcs    | 项目源码 | 文件源码
def SimpleLn(num_samples, dimension = 5):
        Xmean = zeros(dimension)
        Xcov = identity(dimension)
        data_x = multivariate_normal(Xmean, Xcov, num_samples)
        data_z = transpose([normal(0,1,num_samples)])
        data_y = data_x[:,[0]] + data_z
        return data_x, data_y
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def setUp(self):

        import matplotlib as mpl
        mpl.rcdefaults()

        n = 100
        with tm.RNGContext(42):
            gender = np.random.choice(['Male', 'Female'], size=n)
            classroom = np.random.choice(['A', 'B', 'C'], size=n)

            self.hist_df = DataFrame({'gender': gender,
                                      'classroom': classroom,
                                      'height': random.normal(66, 4, size=n),
                                      'weight': random.normal(161, 32, size=n),
                                      'category': random.randint(4, size=n)})

        self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1()
        self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1()
        self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
        self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()

        if self.mpl_ge_1_4_0:
            self.bp_n_objects = 7
        else:
            self.bp_n_objects = 8
        if self.mpl_ge_1_5_0:
            # 1.5 added PolyCollections to legend handler
            # so we have twice as many items.
            self.polycollection_factor = 2
        else:
            self.polycollection_factor = 1
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_boxplot_subplots_return_type(self):
        df = self.hist_df

        # normal style: return_type=None
        result = df.plot.box(subplots=True)
        self.assertIsInstance(result, np.ndarray)
        self._check_box_return_type(result, None, expected_keys=[
                                    'height', 'weight', 'category'])

        for t in ['dict', 'axes', 'both']:
            returned = df.plot.box(return_type=t, subplots=True)
            self._check_box_return_type(
                returned, t,
                expected_keys=['height', 'weight', 'category'],
                check_ax_title=False)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_series_groupby_plotting_nominally_works(self):
        n = 10
        weight = Series(np.random.normal(166, 20, size=n))
        height = Series(np.random.normal(60, 10, size=n))
        with tm.RNGContext(42):
            gender = np.random.choice(['male', 'female'], size=n)

        weight.groupby(gender).plot()
        tm.close()
        height.groupby(gender).hist()
        tm.close()
        # Regression test for GH8733
        height.groupby(gender).plot(alpha=0.5)
        tm.close()
项目:ademxapp    作者:itijyou    | 项目源码 | 文件源码
def Lighting(alpha_std, eig_val, eig_vec):
    def _impl(data):
        if alpha_std == 0:
            return data
        data = data.astype(np.single, copy=False)
        alpha = npr.normal(0, alpha_std, 3)
        rgb = (eig_vec * alpha.reshape((1,3)) * eig_val.reshape((1,3))).sum(1).astype(np.single)
        data += rgb.reshape((1,1,3))
        data /= 1. + alpha_std
        return data
    return _impl

# Blend the two and save into data1
项目:particle    作者:qrqiuren    | 项目源码 | 文件源码
def __init__(self, smp, dt, nparts, neff, initdoa, initvel, initstd,
                 noisestd, expo):
        """
        Parameters
        ----------
        smp : SpectrumSampler
            Sampler of spatial spectrum.
        dt : float
            Time interval of time steps.
        nparts : int
            Number of particles to be drawn by the particle filter.
        neff : int
            The threshold of efficient particles. Should less than `nparts`.
        initdoa : float
            Initial DOA angle.
        initvel : float
            Initial velocity of `initdoa`.
        initstd : float
            Standard deviation of initial particles.
        noisestd : float
            Standard deviation of noise.
        expo : float
            Exponent of computing likelihood of each particles.
        """
        self.smp = smp

        self.A = np.array([[1, dt], [0, 1]])
        self.B = np.array([dt * dt / 2, dt])

        self.nparts = nparts
        self.neff = neff

        self.noisestd = noisestd

        self.expo = expo

        # Draw particles and set initial weight
        x0 = np.array([initdoa, initvel])
        self.x = np.tile(x0, (nparts, 1)).T + \
            (normal(0., initstd, (nparts, 2)) * self.B).T
        self.w = np.ones((nparts,)) / nparts
项目:pyculib    作者:numba    | 项目源码 | 文件源码
def create_array(dtype, shape, slices=None, empty=False):
    """Create a test array of the given dtype and shape.
    if slices is given, the returned array aliases a bigger parent array
    using the specified start and step values. (The stop member is expected to
    be appropriate to yield the given length.)"""

    from numpy.random import normal, seed
    seed(1234)

    def total_size(s):
        # this function doesn't support slices whose members are 'None'
        return s.start + (s.stop - s.start)*np.abs(s.step)

    if not slices:
        a = np.empty(dtype=dtype, shape=shape)
    else:
        if type(shape) is not tuple: # 1D
            pshape = total_size(slices)
        else:
            pshape = tuple([total_size(s) for s in slices])
        parent = np.empty(dtype=dtype, shape=pshape)
        a = parent[slices]

    if not empty:
        mult = np.array(1, dtype=dtype)
        a[:] = normal(0.,1.,shape).astype(dtype) * mult
    return a
项目:klusta    作者:kwikteam    | 项目源码 | 文件源码
def artificial_waveforms(n_spikes=None, n_samples=None, n_channels=None):
    # TODO: more realistic waveforms.
    return .25 * nr.normal(size=(n_spikes, n_samples, n_channels))
项目:klusta    作者:kwikteam    | 项目源码 | 文件源码
def artificial_features(*args):
    return .25 * nr.normal(size=args)
项目:klusta    作者:kwikteam    | 项目源码 | 文件源码
def artificial_traces(n_samples, n_channels):
    # TODO: more realistic traces.
    return .25 * nr.normal(size=(n_samples, n_channels))
项目:libSigNetSim    作者:vincent-noel    | 项目源码 | 文件源码
def applyNoiseToData(self):

        for i, experimental_data in enumerate(self.listOfExperimentalData.keys()):

            t_experimental_data = self.listOfExperimentalData[experimental_data]

            t_filtered_t = []
            t_filtered_values = []

            # Filter some time points to impose a sampling
            if self.sampling is not None:
                for j,t_time in enumerate(t_experimental_data.t):

                    if not (abs((float(t_time)/self.sampling - round(float(t_time)/self.sampling, 0))) > 1e-12 and float(t_time) > 0):
                        t_filtered_t.append(float(t_time))
                        t_filtered_values.append(float(t_experimental_data.values[j]))

            else:
                t_filtered_t = t_experimental_data.t
                t_filtered_values = t_experimental_data.values

            # Add noise to all variables
            if self.noise > 0:
                for j, value in enumerate(t_filtered_values):
                    noise = exp(normal(0, self.noise))
                    t_filtered_values[j] = t_filtered_values[j] * noise

            self.listOfExperimentalData[experimental_data].size = len(t_filtered_t)
            self.listOfExperimentalData[experimental_data].t = t_filtered_t
            self.listOfExperimentalData[experimental_data].values = t_filtered_values
项目:dict_based_learning    作者:tombosc    | 项目源码 | 文件源码
def __init__(self, vocabulary_size, features_size, markov_order, 
                 temperature=1.0, pc_double_meaning=0.2, markov_order_dict=1,
                 min_len_definitions=4, max_len_definitions=8):
        """
        markov_order: integer at least 1 such that
            p(x_t|x_t-1:x_1) = p(x_t|x_t-1:x_t-markov_order)
        temperature: temperature for softmax
        pc_double_meaning: percent of the tokens that have 2 different possible
                           feature vectors, i.e. 2 meanings
        """
        assert(features_size >= 1)
        self.mo = markov_order
        self.V = vocabulary_size
        self.T = temperature
        self.mo_d = markov_order_dict # markov order for the dictionary definitions
        self.min_len_def = min_len_definitions
        self.max_len_def = max_len_definitions

        self.params = normal(0,1,(self.mo * features_size, self.V))
        self.params_d = normal(0,1,(self.mo_d * features_size, self.V))
        # trying to regulate the norm of the features.
        self.features = normal(0,1/(2*np.log(features_size)),
                              (self.V, features_size))

        self.features_size = features_size

        # tokens are composed of a..z letters 
        alphabet = ''.join([chr(c) for c in range(97, 97+26)]) # str(a..z)
        # tokens all have the same size tok_len
        self.tok_len = int(np.log(vocabulary_size) / np.log(len(alphabet)) + 1)
        n_homonyms = int(self.V * pc_double_meaning)
        # enumerate all the tokens
        self.vocabulary = []
        for i, tok in zip(range(self.V - n_homonyms),
                          itertools.product(alphabet, repeat=self.tok_len)):
            self.vocabulary.append(''.join(tok))
        for i in range(n_homonyms):
            tok = np.random.choice(self.V - n_homonyms)
            self.vocabulary.append(self.vocabulary[tok])

        # create definitions and fill the dictionary
        self.dictionary = {}
        for i in range(self.V - n_homonyms):
            tok = self.vocabulary[i]
            self.dictionary[tok] = [self.define_token(i)]
        for i in range(self.V - n_homonyms, self.V):
            tok = self.vocabulary[i]
            self.dictionary[tok].append(self.define_token(i))

        self.initial_features = normal(0,1,(self.mo, features_size))
        self.initial_features_d = normal(0,1,(self.mo_d, features_size))
项目:slitSpectrographBlind    作者:aasensio    | 项目源码 | 文件源码
def seeing(d_over_r0, npix=256, nterms=15, level=None, quiet=False, tiptilt=True):
  """
  Returns the wavefront resulting from a realization of the seeing

  Args:
      d_over_r0 (real): D/r0
      npix (int, optional): number of pixels
      nterms (int, optional): number of terms to include
      level (real, optional): precision level on the wavefront. This sets the number of terms
      quiet (bool, optional): verbose

  Returns:
      real: wavefront
  """
  scale = pow(d_over_r0,5.0/3.0)

  if level:
    narr = numpy.arange(400,dtype='d') + 2
    coef = numpy.sqrt(0.2944*scale*(numpy.power((narr-1),-0.866) - numpy.power(narr,-0.866)))
    wh = numpy.where(coef < level)
    n = wh[0][0]
    norder = int(ceil(sqrt(2*n)-0.5))
    nterms = norder*(norder+1)/2
    if (nterms < 15):
      nterms = 15

  wf = numpy.zeros((npix,npix),dtype='d')

  if (nterms == 0):
    return wf

  resid = numpy.zeros(nterms,dtype='d')
  coeff = numpy.zeros(nterms,dtype='d')

  resid[0:10] = [1.030,0.582,0.134,0.111,0.088,0.065,0.059,0.053,0.046,0.040]

  if (nterms > 10):
    for i in range(10,nterms):
      resid[i] = 0.2944*pow(i+1,-0.866)

  for j in range(2,nterms+1):
    coeff[j-1] = sqrt((resid[j-2]-resid[j-1])*scale)
    if (tiptilt == False and j <= 3):
      coeff[j-1] = 0.0
    wf += coeff[j-1]*normal()*zernike(j,npix=npix)

  if not quiet:
    print( "Computed Zernikes to term %d and RMS %f" % (nterms,coeff[nterms-1]))

  return wf
项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def __init__(self, n=None, method='stub', **distribution):
        """
            Possible arguments for the distribution are:
            - network_type: specify the type of network that should be constructed (THIS IS MANDATORY).
                It can either be the name of a distribution or of a certain network type.

            ['l_partition', 'poisson', 'normal', 'binomial', 'exponential', 'geometric', 'gamma', 'power', 'weibull']
            For specific parameters of the distributions, see:
                http://docs.scipy.org/doc/numpy/reference/routines.random.html

            - method: The probabilistic framework after which the network will be constructed.
            - distribution specific arguments. Check out the description of the specific numpy
                function. Or just give the argument network_type and look at what the error tells you.

           see self._create_graph for more information
        """
        _Graph.__init__(self)
        self.is_static = True
        self._rewiring_attempts = 100000
        self._stub_attempts = 100000
        self.permitted_types = allowed_dists + ["l_partition", 'full']
        self.is_directed = False
        # to do: pass usefull info in here.
        self._info = {}
        #for now only undirected networks
        if n is not None:
            self.n = n
            if method in ['proba', 'stub']:
                self.method = method
            else:
                raise ValueError(method + ' is not a permitted method! Chose either "proba" or "stub"')
            try:
                self.nw_name = distribution.pop('network_type')
                empty_graph = False
            except KeyError:
                self.nn = []
                self._convert_to_array()
                empty_graph = True
                #create an empty graph if network_type is not given
            if not empty_graph:
                if self.nw_name not in self.permitted_types:
                    raise ValueError(
                        "The specified network type \"%s\" is not permitted. \
                        Please chose from " % self.nw_name + '[' + ', '.join(self.permitted_types) + ']')
                self.distribution = distribution
                self._create_graph(**self.distribution)
项目:particle    作者:qrqiuren    | 项目源码 | 文件源码
def salphas_cplx(alpha, gamma, size=None):
    """
    Generate complex random variables under S-alpha-S distribution.

    Please check the reference paper for furthur details on algorithms and
    symbols.

    Parameters
    ----------
    alpha : float
        Alpha coefficient (characteristic exponent) of S-alpha-S distribution.
    gamma : float
        Gamma coefficient (dispersion parameter) of S-alpha-S distribution.
    size : tuple of ints, optional
        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
        samples are drawn. If not indicated, a complex value will be returned.

    Returns
    -------
    a : float
        A real number or a real matrix with size `size` which is the sample of
        the distribution.

    Reference
    ---------
    Tsakalides, P., and Nikias C. L., "The robust covariation-based MUSIC
    (ROC-MUSIC) algorithm for bearing estimation in impulsive noise
    environments", IEEE Transactions on Signal Processing,
    Jul. 1996, Vol. 44 No. 7: 1623-1633
    """
    # Generate sample of S-alpha-S random variable A and calc its square root
    agamma = cos(pi * alpha / 4) ** 2
    a = salphas(alpha=alpha, beta=1, gamma=agamma, size=size)

    # Generate Gaussian sample G1 and G2
    sigma = 2 * (gamma ** (1 / alpha))
    g1 = random.normal(0., sigma, size=size)
    g2 = random.normal(0., sigma, size=size)

    # Calculate the final sample
    x = (np.array(a, dtype=np.complex) ** 0.5) * (g1 + 1j * g2)
    return x
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('task', type=str,
                        choices=['InvertedPendulum', 'InvertedDoublePendulum',
                                 'Reacher', 'HalfCheetah', 'Swimmer', 'Hopper',
                                 'Walker2d', 'Ant', 'Humanoid', 'HumanoidStandup'],
                        help='(Every task is currently v1.)')
    parser.add_argument('--alg', type=str, choices=all_algs)
    parser.add_argument('--nSamples', type=int, default=50)
    parser.add_argument('--save', type=str)
    parser.add_argument('--overwrite', action='store_true')

    args = parser.parse_args()

    allDir = args.save or os.path.join('output.random-search', args.task)
    if os.path.exists(allDir):
        if args.overwrite:
            shutil.rmtree(allDir)
    os.makedirs(allDir, exist_ok=True)

    algs = [args.alg] if args.alg is not None else all_algs
    np.random.seed(0)
    for i in range(args.nSamples):
        l1size = npr.randint(100, 600)
        l2size = npr.randint(100, l1size)
        hp_alg = {
            'l1size': l1size,
            'l2size': l2size,
            'reward_k': 10.**npr.uniform(-4, 1),
            'l2norm': 10.**npr.uniform(-10, -2),
            'pl2norm': 10.**npr.uniform(-10, -2),
            'rate': 10.**npr.uniform(-4, -1),
            'prate': 10.**npr.uniform(-4, -1),
            'outheta': np.maximum(1e-8, npr.normal(loc=0.15, scale=0.1)),
            'ousigma': np.maximum(1e-8, npr.normal(loc=0.1, scale=0.05)),
            'lrelu': 10.**npr.uniform(-4, -1),
            'naf_bn': bool(npr.binomial(1, 0.5)),
            'icnn_bn': bool(npr.binomial(1, 0.25)),
        }
        if hp_alg['l2norm'] < 1e-8: hp_alg['l2norm'] = 0.
        if hp_alg['pl2norm'] < 1e-8: hp_alg['pl2norm'] = 0.
        if hp_alg['lrelu'] < 1e-3: hp_alg['lrelu'] = 0.

        for alg in algs:
            algDir = os.path.join(allDir, alg)
            runExp(args, alg, algDir, i, hp_alg)