Python numpy.random 模块,exponential() 实例源码

我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用numpy.random.exponential()

项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def __init__(self, scale, pre=10):
        """
        This class holds a queue of times drawn from an exponential 
            distribution with a specified scale.

        Arguments:

            - scale: The scale parameter for the exponential distribution.
            - pre: Predefined size of the queue. Default=10
        """
        self.scale = scale
        self.pre = pre
        self.queue = SimpleQueue(maxsize=pre + 1)
        self.v_put = vectorize(self.queue.put_nowait)
        #the exponential dist is not defined for a rate of 0
        #therefore if the rate is 0 (scale is None then) huge times are set
        if self.scale in [None, 0]:
            self.scale = 0
            self.draw_fct = no_mut
        else:
            self.draw_fct = random.exponential
        #fillup the queue
        self.fillup()
        # there was: (new version compatible with pickeling see method below)
        self.v_get = vectorize(self.get_val)
项目:epsilon_free_inference    作者:gpapamak    | 项目源码 | 文件源码
def sim_steps(self, num_steps):
        """Simulates the process with the gillespie algorithm for a specified number of steps."""

        times = [self.time]
        states = [self.state.copy()]

        for _ in xrange(num_steps):

            rates = self.params * self._calc_propensities()
            total_rate = rates.sum()

            if total_rate == 0:
                self.time = float('inf')
                break

            self.time += rng.exponential(scale=1/total_rate)

            reaction = helper.discrete_sample(rates / total_rate)[0]
            self._do_reaction(reaction)

            times.append(self.time)
            states.append(self.state.copy())

        return times, np.array(states)
项目:mandos    作者:carolinetomo    | 项目源码 | 文件源码
def sim_occurrences(tree,r):
    occurrences = {}
    for i in tree.iternodes():
        if i.istip:
            cur_length = i.length
            occurrences[i.label] = []
            i.old_length = i.length
            while (cur_length > 0):
                cur_length -= exponential(r)
                cur_time = i.height + cur_length
                if cur_length > 0:
                    occurrences[i.label].append(cur_time)
            if len(occurrences[i.label]) == 0:
                occurrences[i.label].append(i.height)
            elif i.height == 0.:
                occurrences[i.label].append(i.height)
            i.length = i.old_length
    return occurrences
项目:Ultras-Sound-Nerve-Segmentation---Kaggle    作者:Simoncarbo    | 项目源码 | 文件源码
def transform(image): #translate, shear, stretch, flips?
    rows,cols = image.shape

    angle = random.uniform(-1.5,1.5)
    center = (rows / 2 - 0.5+random.uniform(-50,50), cols / 2 - 0.5+random.uniform(-50,50))
    def_image = tf.rotate(image, angle = angle, center = center,clip = True, preserve_range = True,order = 5)

    alpha = random.uniform(0,5)
    sigma = random.exponential(scale = 5)+2+alpha**2
    def_image = elastic_transform(def_image, alpha, sigma)

    def_image = def_image[10:-10,10:-10]

    return def_image

# sigma: variance of filter, fixes homogeneity of transformation 
#    (close to zero : random, big: translation)
项目:bayes-qnet    作者:casutton    | 项目源码 | 文件源码
def test_exponential_sample_parameters (self):
        sampling.set_seed(3242)
        for rep in range(10):
            mu = random.exponential(1.0)
            f = distributions.Exponential (mu)
            x = f.sample(10000)            
            params = [ f.sample_parameters(x)[0] for i in xrange(10000) ] 
            self.assertTrue (abs(mu - numpy.mean(params)) < 0.03, "Mismatch: MU %s params %s" % (mu, numpy.mean(params)))
项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def get_val(self, a=None):
        """
        Function returning a value drawn form the exponential distribution.
        """
        try:
            return self.queue.get_nowait()
        except Empty:
            self.fillup()
            return self.queue.get_nowait()

    #def v_get(self, an_array):
    #    #return map(self.get_val, xrange(an_array.size))
    #    return apply_along_axis(self.get_val, 0, an_array)
    # to transform the priority queue holding the upcoming events into a pickle-abel list
项目:epsilon_free_inference    作者:gpapamak    | 项目源码 | 文件源码
def sim_time(self, dt, duration, max_n_steps=float('inf')):
        """Simulates the process with the gillespie algorithm for a specified time duration."""

        num_rec = int(duration / dt) + 1
        states = np.zeros([num_rec, self.state.size])
        cur_time = self.time
        n_steps = 0

        for i in xrange(num_rec):

            while cur_time > self.time:

                rates = self.params * self._calc_propensities()
                total_rate = rates.sum()

                if total_rate == 0:
                    self.time = float('inf')
                    break

                self.time += rng.exponential(scale=1/total_rate)

                reaction = helper.discrete_sample(rates / total_rate)[0]
                self._do_reaction(reaction)

                n_steps += 1
                if n_steps > max_n_steps:
                    raise SimTooLongException(max_n_steps)

            states[i] = self.state.copy()
            cur_time += dt

        return np.array(states)
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def __init__(self, nstates=2, max_samples=10, klow=1.0e-1, khi=1.0e1,
                 randseed=None, sample_fudge=0.0, unsampled_states=0):
        self.max_samples = int(max_samples)
        self.nstates = int(nstates)
        # Randomize the HO parameters.
        seed(randseed)
        klow, khi = float(klow), float(khi)
        #spacing = uniform(self.nstates, size=self.nstates)
        #k = klow*(khi / klow)**(spacing / self.nstates)
        # k = uniform(float(klow), float(khi), size=self.nstates)
        k = klow + (khi - klow)*exponential(1.0, self.nstates)
        sigma = sqrt(1/k)
        x0 = uniform(-0.5*sigma.max(), 0.5*sigma.max(), size=self.nstates)
        # Choose which states to sample from.
        nsampled_states = self.nstates - int(unsampled_states)
        sampled_indices = choice(arange(self.nstates), nsampled_states, False)
        sampled_indices.sort()
        # Generate samples up to max.
        x_in = normal(0.0, 1.0, (nsampled_states, self.max_samples))
        x_in *= sigma[sampled_indices, newaxis]
        x_in += x0[sampled_indices, newaxis]
        self.data_size = zeros(self.nstates, int32) 
        self.data_size[sampled_indices] += self.max_samples
        # Randomly remove samples for uneven sampling.  Note that at least one
        # state must remain the same, otherwise max_samples is incorrect.
        # Also, we don't actually have to do anything to the actual samples, bc
        # the sample size is used as a mask!
        #
        del_max = int(sample_fudge*self.max_samples + 0.5) + 1
        if del_max > 1:
            sample_shift = randint(0, del_max, nsampled_states)
            if all(sample_shift > 0): # Randomly reset the shift for a state.
                sample_shift[choice(arange(nsampled_states))] = 0
            self.data_size[sampled_indices] -= sample_shift
        self.unsampled_indices = where(self.data_size == 0)[0]
        # Compute the energy in all states
        u_ijn = 0.5*(k[:, newaxis]*(x_in[:, newaxis, :] - x0[:, newaxis])**2)
        self.u_ijn = u_ijn
        self.f_actual = 0.5*log(k / k[0])[1:]
        self.x0 = x0
        self.x_jn = x_in
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        return npr.exponential(scale=self.mean, size=n_samples)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def sample(self, n_samples):
        return npr.exponential(scale=self.mean, size=n_samples)
项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def __init__(self, n=None, method='stub', **distribution):
        """
            Possible arguments for the distribution are:
            - network_type: specify the type of network that should be constructed (THIS IS MANDATORY).
                It can either be the name of a distribution or of a certain network type.

            ['l_partition', 'poisson', 'normal', 'binomial', 'exponential', 'geometric', 'gamma', 'power', 'weibull']
            For specific parameters of the distributions, see:
                http://docs.scipy.org/doc/numpy/reference/routines.random.html

            - method: The probabilistic framework after which the network will be constructed.
            - distribution specific arguments. Check out the description of the specific numpy
                function. Or just give the argument network_type and look at what the error tells you.

           see self._create_graph for more information
        """
        _Graph.__init__(self)
        self.is_static = True
        self._rewiring_attempts = 100000
        self._stub_attempts = 100000
        self.permitted_types = allowed_dists + ["l_partition", 'full']
        self.is_directed = False
        # to do: pass usefull info in here.
        self._info = {}
        #for now only undirected networks
        if n is not None:
            self.n = n
            if method in ['proba', 'stub']:
                self.method = method
            else:
                raise ValueError(method + ' is not a permitted method! Chose either "proba" or "stub"')
            try:
                self.nw_name = distribution.pop('network_type')
                empty_graph = False
            except KeyError:
                self.nn = []
                self._convert_to_array()
                empty_graph = True
                #create an empty graph if network_type is not given
            if not empty_graph:
                if self.nw_name not in self.permitted_types:
                    raise ValueError(
                        "The specified network type \"%s\" is not permitted. \
                        Please chose from " % self.nw_name + '[' + ', '.join(self.permitted_types) + ']')
                self.distribution = distribution
                self._create_graph(**self.distribution)
项目:particle    作者:qrqiuren    | 项目源码 | 文件源码
def salphas(alpha, gamma, beta=0., size=None):
    """
    Generate random variables under S-alpha-S distribution.

    Please check the reference paper for furthur details on algorithms and
    symbols.

    Parameters
    ----------
    alpha : float
        Alpha coefficient (characteristic exponent) of S-alpha-S distribution.
    gamma : float
        Gamma coefficient (dispersion parameter) of S-alpha-S distribution.
    beta : float
        Beta coefficient (skewness parameter) of alpha stable distribution. By
        default, this value will be 0 as the definition of S-alpha-S
        distribution. But it allows configuration to generate samples in a
        broader situation.
    size : tuple of ints, optional
        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
        samples are drawn. If not indicated, a single value will be returned.

    Returns
    -------
    a : float
        A real number or a real matrix with size `size` which is the sample of
        the distribution.

    Reference
    ---------
    Tsakalides, P., and Nikias C. L., "The robust covariation-based MUSIC
    (ROC-MUSIC) algorithm for bearing estimation in impulsive noise
    environments", IEEE Transactions on Signal Processing,
    Jul. 1996, Vol. 44 No. 7: 1623-1633
    """
    # Draw random vars
    pi2 = pi / 2
    W = random.exponential(scale=1., size=size)
    U = random.uniform(low=-pi2, high=pi2, size=size)

    # Sampling with params alpha and beta
    if alpha == 1:
        p2bu = pi2 + beta * U
        S = (p2bu * tan(U) - beta * log(pi2 * W * cos(U) / p2bu)) / pi2
    else:
        U0 = -pi2 * beta * (1 - abs(1 - alpha)) / alpha
        auu0 = alpha * (U - U0)
        D = (cos(arctan(beta * tan(pi2 * alpha)))) ** (1 / alpha)
        E = sin(auu0) / ((cos(U)) ** (1 / alpha))
        F = (cos(U - auu0) / W) ** ((1 - alpha) / alpha)
        S = D * E * F

    # Making gamma efficient
    a = gamma ** (1 / alpha) * S
    return a