Python scipy.stats 模块,uniform() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.stats.uniform()

项目:bolero    作者:rock-learning    | 项目源码 | 文件源码
def test_sample_contexts_from_distribution():
    env = Catapult(segments=[(0, 0), (20, 0)], context_interval=(0, 20),
                   context_distribution=uniform(5, 10), random_state=0)
    env.init()

    contexts = np.empty(1000)
    for i in range(contexts.shape[0]):
        context = env.request_context(None)
        contexts[i] = context[0]

    norm_dist = uniform(0.25, 0.5)
    assert_true(np.all(0.25 <= contexts))
    assert_true(np.all(contexts <= 0.75))
    mean, var = norm_dist.stats("mv")
    assert_almost_equal(np.mean(contexts), mean, places=1)
    assert_almost_equal(np.var(contexts), var, places=1)
项目:PyDREAM    作者:LoLab-VU    | 项目源码 | 文件源码
def likelihood(parameter_vector):

    parameter_vector = 10**np.array(parameter_vector)

    #Solve ODE system given parameter vector
    yout = odeint(odefunc, y0, tspan, args=(parameter_vector,))

    cout = yout[:, 2]

    #Calculate log probability contribution given simulated experimental values.

    logp_ctotal = np.sum(like_ctot.logpdf(cout))

    #If simulation failed due to integrator errors, return a log probability of -inf.
    if np.isnan(logp_ctotal):
        logp_ctotal = -np.inf

    return logp_ctotal


# Add vector of rate parameters to be sampled as unobserved random variables in DREAM with uniform priors.
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def acquire(self, n, t=None):
        """Return random points from uniform distribution.

        Parameters
        ----------
        n : int
            Number of acquisition points to return.
        t : int, optional
            (unused)

        Returns
        -------
        x : np.ndarray
            The shape is (n, input_dim)

        """
        bounds = np.stack(self.model.bounds)
        return ss.uniform(bounds[:, 0], bounds[:, 1] - bounds[:, 0]) \
            .rvs(size=(n, self.model.input_dim), random_state=self.random_state)
项目:pyabc    作者:neuralyzer    | 项目源码 | 文件源码
def test_stop_acceptance_rate_too_low(db_path):
    set_acc_rate = 0.2

    def model(x):
        return {"par": x["par"] + sp.randn()}

    def dist(x, y):
        return abs(x["par"] - y["par"])

    abc = ABCSMC(model, Distribution(par=st.uniform(0, 10)), dist, 10)
    abc.new(db_path, {"par": .5})
    history = abc.run(-1, 8, min_acceptance_rate=set_acc_rate)
    df = history.get_all_populations()
    df["acceptance_rate"] = df["particles"] / df["samples"]
    assert df["acceptance_rate"].iloc[-1] < set_acc_rate
    assert df["acceptance_rate"].iloc[-2] >= set_acc_rate
项目:openml-pimp    作者:janvanrijn    | 项目源码 | 文件源码
def get_uniform_paramgrid(hyperparameters, fixed_parameters):
    param_grid = dict()
    for param_name, hyperparameter in hyperparameters.items():
        if fixed_parameters is not None and param_name in fixed_parameters.keys():
            continue
        if isinstance(hyperparameter, CategoricalHyperparameter):
            all_values = hyperparameter.choices
            if all(item in ['True', 'False'] for item in all_values):
                all_values = [bool(item) for item in all_values]
            param_grid[param_name] = all_values
        elif isinstance(hyperparameter, UniformFloatHyperparameter):
            if hyperparameter.log:
                param_grid[param_name] = loguniform(base=2, low=hyperparameter.lower, high=hyperparameter.upper)
            else:
                param_grid[param_name] = uniform(loc=hyperparameter.lower, scale=hyperparameter.upper-hyperparameter.lower)
        elif isinstance(hyperparameter, UniformIntegerHyperparameter):
            if hyperparameter.log:
                param_grid[param_name] = loguniform_int(base=2, low=hyperparameter.lower, high=hyperparameter.upper)
            else:
                param_grid[param_name] = randint(low=hyperparameter.lower, high=hyperparameter.upper+1)
        else:
            raise ValueError()
    return param_grid
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_param_sampler():
    # test basic properties of param sampler
    param_distributions = {"kernel": ["rbf", "linear"],
                           "C": uniform(0, 1)}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=10, random_state=0)
    samples = [x for x in sampler]
    assert_equal(len(samples), 10)
    for sample in samples:
        assert_true(sample["kernel"] in ["rbf", "linear"])
        assert_true(0 <= sample["C"] <= 1)

    # test that repeated calls yield identical parameters
    param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=3, random_state=0)
    assert_equal([x for x in sampler], [x for x in sampler])

    if sp_version >= (0, 16):
        param_distributions = {"C": uniform(0, 1)}
        sampler = ParameterSampler(param_distributions=param_distributions,
                                   n_iter=10, random_state=0)
        assert_equal([x for x in sampler], [x for x in sampler])
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def gen(self, N, trials, normal_p_range, anomaly_p_range, anomaly_scale = 1.0):
    self.N = N
    self.trials = trials

    self.gens = [
      ?ompound_distribution(
        stats.uniform(loc=normal_p_range[0], scale=normal_p_range[1] - normal_p_range[0]),
        lambda a: stats.gamma(a = a, scale = 1.0)
      ),

      ?ompound_distribution(
        stats.uniform(loc=anomaly_p_range[0], scale=anomaly_p_range[1] - anomaly_p_range[0]),
        lambda a: stats.gamma(a = a, scale = anomaly_scale)
      )
    ]

    self.priors = np.array([0.9, 0.1])

    self.cats, self.params, self.X = compound_rvs(self.gens, self.priors, self.N, self.trials)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformPDF(self):
    with self.test_session():
      a = constant_op.constant([-3.0] * 5 + [15.0])
      b = constant_op.constant([11.0] * 5 + [20.0])
      uniform = uniform_lib.Uniform(a=a, b=b)

      a_v = -3.0
      b_v = 11.0
      x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)

      def _expected_pdf():
        pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
        pdf[x > b_v] = 0.0
        pdf[x < a_v] = 0.0
        pdf[5] = 1.0 / (20.0 - 15.0)
        return pdf

      expected_pdf = _expected_pdf()

      pdf = uniform.prob(x)
      self.assertAllClose(expected_pdf, pdf.eval())

      log_pdf = uniform.log_prob(x)
      self.assertAllClose(np.log(expected_pdf), log_pdf.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformCDF(self):
    with self.test_session():
      batch_size = 6
      a = constant_op.constant([1.0] * batch_size)
      b = constant_op.constant([11.0] * batch_size)
      a_v = 1.0
      b_v = 11.0
      x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)

      uniform = uniform_lib.Uniform(a=a, b=b)

      def _expected_cdf():
        cdf = (x - a_v) / (b_v - a_v)
        cdf[x >= b_v] = 1
        cdf[x < a_v] = 0
        return cdf

      cdf = uniform.cdf(x)
      self.assertAllClose(_expected_cdf(), cdf.eval())

      log_cdf = uniform.log_cdf(x)
      self.assertAllClose(np.log(_expected_cdf()), log_cdf.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformSample(self):
    with self.test_session():
      a = constant_op.constant([3.0, 4.0])
      b = constant_op.constant(13.0)
      a1_v = 3.0
      a2_v = 4.0
      b_v = 13.0
      n = constant_op.constant(100000)
      uniform = uniform_lib.Uniform(a=a, b=b)

      samples = uniform.sample(n, seed=137)
      sample_values = samples.eval()
      self.assertEqual(sample_values.shape, (100000, 2))
      self.assertAllClose(
          sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-2)
      self.assertFalse(
          np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
      self.assertFalse(
          np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformNans(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = uniform_lib.Uniform(a=a, b=b)

      no_nans = constant_op.constant(1.0)
      nans = constant_op.constant(0.0) / constant_op.constant(0.0)
      self.assertTrue(math_ops.is_nan(nans).eval())
      with_nans = array_ops.stack([no_nans, nans])

      pdf = uniform.prob(with_nans)

      is_nan = math_ops.is_nan(pdf).eval()
      self.assertFalse(is_nan[0])
      self.assertTrue(is_nan[1])
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformSampleWithShape(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = uniform_lib.Uniform(a, b)

      pdf = uniform.prob(uniform.sample((2, 3)))
      # pylint: disable=bad-continuation
      expected_pdf = [
          [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
          [[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
      ]
      # pylint: enable=bad-continuation
      self.assertAllClose(expected_pdf, pdf.eval())

      pdf = uniform.prob(uniform.sample())
      expected_pdf = [1.0, 0.1]
      self.assertAllClose(expected_pdf, pdf.eval())
项目:pyISC    作者:STREAM3    | 项目源码 | 文件源码
def test_outlier_detection(self):
        print "Start of test"
        n_samples = 1000
        norm_dist = stats.norm(0, 1)

        truth = np.ones((n_samples,))
        truth[-100:] = -1

        X0 = norm_dist.rvs(n_samples)
        X = np.c_[X0*5, X0+norm_dist.rvs(n_samples)*2]

        uniform_dist = stats.uniform(-10,10)

        X[-100:] = np.c_[uniform_dist.rvs(100),uniform_dist.rvs(100)]

        outlier_detector = pyisc.SklearnOutlierDetector(
            100.0/n_samples,
            pyisc.P_Gaussian([0,1])
        )

        outlier_detector.fit(X, np.array([1]*len(X)))


        self.assertLess(outlier_detector.threshold_, 0.35)
        self.assertGreater(outlier_detector.threshold_, 0.25)

        predictions = outlier_detector.predict(X, np.array([1]*len(X)))

        accuracy =  sum(truth == predictions)/float(n_samples)

        print "accuracy", accuracy
        self.assertGreater(accuracy, 0.85)
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def test_large_grid():
        """In this test, we purposely overfit a RandomForest to completely random data
        in order to assert that the test error will far supercede the train error.
        """

        if not SK18:
            custom_cv = KFold(n=y_train.shape[0], n_folds=3, shuffle=True, random_state=42)
        else:
            custom_cv = KFold(n_splits=3, shuffle=True, random_state=42)

        # define the pipe
        pipe = Pipeline([
            ('scaler', SelectiveScaler()),
            ('pca', SelectivePCA(weight=True)),
            ('rf', RandomForestClassifier(random_state=42))
        ])

        # define hyper parameters
        hp = {
            'scaler__scaler': [StandardScaler(), RobustScaler(), MinMaxScaler()],
            'pca__whiten': [True, False],
            'pca__weight': [True, False],
            'pca__n_components': uniform(0.75, 0.15),
            'rf__n_estimators': randint(5, 10),
            'rf__max_depth': randint(5, 15)
        }

        # define the grid
        grid = RandomizedSearchCV(pipe, hp, n_iter=2, scoring='accuracy', n_jobs=1, cv=custom_cv, random_state=42)

        # this will fail because we haven't fit yet
        assert_fails(grid.score, (ValueError, AttributeError), X_train, y_train)

        # fit the grid
        grid.fit(X_train, y_train)

        # score for coverage -- this might warn...
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            grid.score(X_train, y_train)

        # coverage:
        assert grid._estimator_type == 'classifier'

        # get predictions
        tr_pred, te_pred = grid.predict(X_train), grid.predict(X_test)

        # evaluate score (SHOULD be better than random...)
        accuracy_score(y_train, tr_pred), accuracy_score(y_test, te_pred)

        # grid score reports:
        # assert fails for bad percentile
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 0.0})
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 1.0})

        # assert fails for bad y_axis
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'y_axis': 'bad_axis'})

        # assert passes otherwise
        report_grid_score_detail(grid, charts=True, percentile=0.95)  # just ensure percentile works
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def test_simple_hpo():

    def f(args):
        x = args['x']
        return x*x

    s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
    trials = []

    # Test fmin and ability to continue adding to trials
    best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    assert len(trials) == 50, "HPO continuation trials not working"

    # Test verbose flag
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    yarray = np.array([tr['loss'] for tr in trials])
    np.testing.assert_array_less(yarray, 100.)

    xarray = np.array([tr['x'] for tr in trials])
    np.testing.assert_array_less(np.abs(xarray), 10.)

    assert best['loss'] < 100., "HPO out of range"
    assert np.abs(best['x']) < 10., "HPO out of range"

    # Test unknown distributions
    s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
    trials2 = []
    with pytest.raises(ValueError) as excinfo:
        fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
    assert "Unknown distribution type for variable" in str(excinfo.value)

    s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
    trials3 = []
    fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3)
项目:dask-ml    作者:dask    | 项目源码 | 文件源码
def test_search_basic(xy_classification):
    X, y = xy_classification
    param_grid = {'class_weight': [None, 'balanced']}

    a = dms.GridSearchCV(SVC(kernel='rbf'), param_grid)
    a.fit(X, y)

    param_dist = {'C': stats.uniform}
    b = dms.RandomizedSearchCV(SVC(kernel='rbf'), param_dist)
    b.fit(X, y)
项目:PyDREAM    作者:LoLab-VU    | 项目源码 | 文件源码
def likelihood(parameter_vector):    

    param_dict = {pname: pvalue for pname, pvalue in zip(pysb_sampled_parameter_names, parameter_vector)}

    for pname, pvalue in param_dict.items():

        #Change model parameter values to current location in parameter space

        model.parameters[pname].value = 10**(pvalue)

    #Simulate experimentally measured Ctotal values.

    solver.run()

    #Calculate log probability contribution from simulated experimental values.

    logp_ctotal = np.sum(like_ctot.logpdf(solver.yobs['C_total']))

    #If model simulation failed due to integrator errors, return a log probability of -inf.
    if np.isnan(logp_ctotal):
        logp_ctotal = -np.inf

    return logp_ctotal


# Add vector of PySB rate parameters to be sampled as unobserved random variables to DREAM with uniform priors.
项目:PyDREAM    作者:LoLab-VU    | 项目源码 | 文件源码
def multidmodel_uniform():
    """Multidimensional model with uniform priors."""

    lower = np.array([-5, -9, 5, 3])
    upper = np.array([10, 0, 7, 8])
    range = upper-lower

    x = SampledParam(uniform, loc=lower, scale=range)
    like =simple_likelihood

    return [x], like
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def test_rvs_prior_ok(self):
        means = [0.8, 0.5]
        weights = [.3, .7]
        N = 10000
        prior_logpdf = ss.uniform(0, 1).logpdf
        rvs = GMDistribution.rvs(means, weights=weights, size=N, prior_logpdf=prior_logpdf)

        # Ensure prior pdf > 0 for all samples
        assert np.all(np.isfinite(prior_logpdf(rvs)))
项目:ottertune    作者:cmu-db    | 项目源码 | 文件源码
def gen_sample(loc, scale, sample, distribution_type):
    if distribution_type == NORMAL_DISTRIBUTION_TYPE:
        return norm(loc=loc, scale=scale).ppf(sample)
    elif distribution_type == UNIFORM_DISTRIBUTION_TYPE:
        return uniform(loc=loc, scale=scale).ppf(sample)
    else:
        raise Exception("Invalid distribution type: {}"
                        .format(distribution_type))
项目:RIDDLE    作者:jisungk    | 项目源码 | 文件源码
def rvs(self, random_state=None):
        if random_state is None:
            gen = uniform(loc=self.lo, scale=self.scale).rvs()
        else:
            gen = uniform(loc=self.lo, scale=self.scale).rvs(random_state=random_state)

        if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
            return 0.0

        return gen
项目:RIDDLE    作者:jisungk    | 项目源码 | 文件源码
def rvs(self, random_state=None):
        if random_state is None:
            exp = uniform(loc=self.lo, scale=self.scale).rvs()
        else:
            exp = uniform(loc=self.lo, scale=self.scale).rvs(random_state=random_state)

        if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
            return 0.0

        return self.base ** exp
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
        if rng is None:
            rng = gu.gen_rng(1)
        if outputs is None:
            outputs = [0]
        if inputs is None:
            inputs = [1]
        if noise is None:
            noise = .1
        self.rng = rng
        self.outputs = outputs
        self.inputs = inputs
        self.noise = noise
        self.uniform = uniform(loc=-self.noise, scale=2*self.noise)
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
        assert targets == self.outputs
        assert inputs.keys() == self.inputs
        assert not constraints
        x = inputs[self.inputs[0]]
        u = self.rng.rand()
        noise = self.rng.uniform(low=-self.noise, high=self.noise)
        if u < .5:
            y = x**2 + noise
        else:
            y = -(x**2 + noise)
        return {self.outputs[0]: y}
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logpdf(self, rowid, targets, constraints=None, inputs=None):
        assert targets.keys() == self.outputs
        assert inputs.keys() == self.inputs
        assert not constraints
        x = inputs[self.inputs[0]]
        y = targets[self.outputs[0]]
        return logsumexp([
            np.log(.5)+self.uniform.logpdf(y-x**2),
            np.log(.5)+self.uniform.logpdf(-y-x**2)
        ])
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def __init__(self, outputs=None, inputs=None, noise=None, rng=None):
        if rng is None:
            rng = gu.gen_rng(1)
        if outputs is None:
            outputs = [0]
        if inputs is None:
            inputs = [1]
        if noise is None:
            noise = .1
        self.rng = rng
        self.outputs = outputs
        self.inputs = inputs
        self.noise = noise
        self.uniform = uniform(scale=self.noise)
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
        assert targets == self.outputs
        assert inputs.keys() == self.inputs
        assert not constraints
        x = inputs[self.inputs[0]]
        noise = self.rng.uniform(high=self.noise)
        if np.cos(x) < 0:
            y = np.cos(x) + noise
        else:
            y = np.cos(x) - noise
        return {self.outputs[0]: y}
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def __init__(self, outputs=None, inputs=None, low=0, high=1, rng=None):
        assert not inputs
        if rng is None:
            rng = gu.gen_rng(0)
        if outputs is None:
            outputs = [0]
        self.rng = rng
        self.low = low
        self.high = high
        self.outputs = outputs
        self.inputs = []
        self.uniform = uniform(loc=self.low, scale=self.high-self.low)
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def simulate(self, rowid, targets, constraints=None, inputs=None, N=None):
        assert not constraints
        assert targets == self.outputs
        x = self.rng.uniform(low=self.low, high=self.high)
        return {self.outputs[0]: x}
项目:cgpm    作者:probcomp    | 项目源码 | 文件源码
def logpdf(self, rowid, targets, constraints=None, inputs=None):
        assert not constraints
        assert not inputs
        assert targets.keys() == self.outputs
        x = targets[self.outputs[0]]
        return self.uniform.logpdf(x)
项目:scrap    作者:BruceJohnJennerLawso    | 项目源码 | 文件源码
def __init__(self, data, mleDiffCutoff=1.0):
        print [min(data), max(data)]

        distributions = [st.laplace, st.norm, st.expon, st.dweibull, st.invweibull, st.lognorm, st.uniform]
        mles = []

        for distribution in distributions:
            pars = distribution.fit(data)
            mle = distribution.nnlf(pars, data)
            mles.append(mle)

        results = [(distribution.name, mle) for distribution, mle in zip(distributions, mles)]

        for dist in sorted(zip(distributions, mles), key=lambda d: d[1]):
            print dist
        best_fit = sorted(zip(distributions, mles), key=lambda d: d[1])[0]
        print 'Best fit reached using {}, MLE value: {}'.format(best_fit[0].name, best_fit[1])          

        self.modelSets = []

        self.modelOptions = [mod[0].name for mod in sorted(zip(distributions, mles), key=lambda d: d[1])]
        ## list of scipy distribution ids sorted by their MLEs given the data
        ## [0] is best, [1], next best and so on


        for model in sorted(zip(distributions, mles), key=lambda d: d[1]):
            if(model[0].name in getAvailableDistributionsByScipyIds()):
                try:
                    modelDist = getDistributionByScipyId(model[0].name, data)
                    self.modelSets.append([modelDist, model[1]])
                    ## append the distribution object and the MLE value for this
                    ## particular distribution & the data

                    ## ah frig, I think in the bimodal case, it will be
                    ## something like 
                except RuntimeError:
                    pass    
            else:
                ## nothing that can be done here, if we dont have a object of
                ## the distribution needed available, we cant do much about it
                pass
项目:alf-python    作者:gbrammer    | 项目源码 | 文件源码
def get_default_priors(param_names, limits=PRIOR_LIMITS):
        from scipy.stats import uniform
        from collections import OrderedDict

        prior = OrderedDict()
        for p in param_names:
            if p in limits:
                lim = limits[p]
                prior[p] = uniform(loc=lim[0], scale=lim[1]-lim[0])
            else:
                prior[p] = uniform(loc=-1.e10, scale=2.e10)

        return prior
项目:ngsphy    作者:merlyescalona    | 项目源码 | 文件源码
def value(self,samples=1):
        """
        Samples number of values given from the specific distribution.
        ------------------------------------------------------------------------
        - samples: number of values that will be returned.
        """
        value=0
        try:
            for item in self.__params:
                if item==0: break
            if item==0: value=[0]*samples
            else:
                if self.__name=="b": value=self.binom(samples)
                if self.__name=="e": value=self.exponential(samples)
                if self.__name=="f": value=self.fixed(samples)
                if self.__name=="g": value=self.gamma(samples)
                if self.__name=="g1": value=self.gamma1(samples)
                if self.__name=="ln": value=self.lognormal(samples)
                if self.__name=="n": value=self.normal(samples)
                if self.__name=="nb": value=self.nbinom(samples)
                if self.__name=="p": value=self.poisson(samples)
                if self.__name=="u": value=self.uniform(samples)
        except Exception as ex:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            message="\n\tUnexpected: {0} | {1} - File: {2} - Line:{3}".format(\
                ex,exc_type, fname, exc_tb.tb_lineno)
            status=False
            raise Exception(message)
            # self.appLogger.error(message)
            # sys.exit()

        return value
项目:ngsphy    作者:merlyescalona    | 项目源码 | 文件源码
def uniform(self,samples):
        """
        Sampling from a Poisson distribution
        Parameters:
        meean
        ------------------------------------------------------------------------
        - samples: number of values that will be returned.
        """
        minParam=float(self.__params[0]*1.0)
        maxParam=float(self.__params[1]*1.0)
        f= np.random.uniform(low=minParam,high=maxParam,size=samples)
        return f
项目:pygcam    作者:JGCRI    | 项目源码 | 文件源码
def uniformMinMax(min, max):
    return uniform(loc=min, scale=(max - min))
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_param_sampler():
    # test basic properties of param sampler
    param_distributions = {"kernel": ["rbf", "linear"],
                           "C": uniform(0, 1)}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=10, random_state=0)
    samples = [x for x in sampler]
    assert_equal(len(samples), 10)
    for sample in samples:
        assert_true(sample["kernel"] in ["rbf", "linear"])
        assert_true(0 <= sample["C"] <= 1)
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def gen(self, normal_mu_range, anomaly_mu_range):
    self.gens = [
      compound_distribution(
        stats.uniform(loc=anomaly_mu_range[0], scale=anomaly_mu_range[1] - anomaly_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      ),

      compound_distribution(
        stats.uniform(loc=normal_mu_range[0], scale=normal_mu_range[1] - normal_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      )
    ]

    self.priors = np.array([0.1, 0.9])

    n = 10
    MC = CameraMC(self.priors, self.gens, image_shape=(1, n, n), n_frames=100)

    self.cats, self.params, self.imgs = MC.get_sample()
    self.hists = ndcount(self.imgs).reshape(n, n, -1)
    self.hists = self.hists.astype('float32') / np.sum(self.hists, axis=2)[:, :, None]
    self.cats = self.cats.reshape(-1)

    print("Img shape %s" % (self.imgs.shape, ))
    print("Hists shape %s" % (self.hists.shape, ))
    print("Categories shape %s" % (self.cats.shape, ))
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def gen(self, normal_mu_range, anomaly_mu_range):
    self.gens = [
      compound_distribution(
        stats.uniform(loc=anomaly_mu_range[0], scale=anomaly_mu_range[1] - anomaly_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      ),

      compound_distribution(
        stats.uniform(loc=normal_mu_range[0], scale=normal_mu_range[1] - normal_mu_range[0]),
        truncated(stats.poisson, max_value=1024)
      )
    ]

    self.priors = np.array([0.1, 0.9])

    n = 100
    m = 10
    bins = 64
    MC = CameraMC(self.priors, self.gens, image_shape=(1, n, ), n_frames=100, max_value=bins)

    X = np.ndarray(shape=(m, n, bins), dtype='float32')
    cats = np.ndarray(shape=(m, n), dtype='float32')

    for i in xrange(m):
      cats[i], _, imgs = MC.get_sample()
      h = ndcount(imgs, bins=bins)
      print h.shape
      h = h.reshape(n, bins)

      X[i] = h.astype('float32') / np.sum(h, axis=1)[:, None]

    print("X shape %s" % (X.shape, ))
    print("Categories shape %s" % (cats.shape, ))

    self.X = X
    self.cats = cats
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def test_separable(self):
    bins = 10
    frames = 100

    comp1 = compound_distribution(
      parameter_distribution=stats.uniform(0.0, 0.25),
      signal_family=lambda p: stats.binom(bins - 1, p),
      binarize_signal=False, bins = bins
    )

    comp2 = compound_distribution(
      parameter_distribution=stats.uniform(0.5, 1.0),
      signal_family=lambda p: stats.binom(bins - 1, p),
      binarize_signal=False, bins=bins
    )

    grid1 = np.linspace(0.0, 0.25, num=200)
    grid2 = np.linspace(0.5, 1.0, num=200)

    prior1, prior2 = 0.5, 0.5

    gen = CompoundMC(
      category_priors=[prior1, prior2],
      compounds=[comp1, comp2],
      n_pixels=100, n_frames=frames
    )

    cats, params, X = gen.rvs(size=1)

    clf = FastBayesianClassifier(
      priors=[prior1, prior2],
      compounds=[comp1, comp2],
      parameter_grids=[grid1, grid2]
    )

    y = clf.predict_proba(X)

    print np.sum(np.argmax(cats, axis=1) != y)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformRange(self):
    with self.test_session():
      a = 3.0
      b = 10.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      self.assertAllClose(a, uniform.a.eval())
      self.assertAllClose(b, uniform.b.eval())
      self.assertAllClose(b - a, uniform.range().eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformShape(self):
    with self.test_session():
      a = constant_op.constant([-3.0] * 5)
      b = constant_op.constant(11.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      self.assertEqual(uniform.batch_shape().eval(), (5,))
      self.assertEqual(uniform.get_batch_shape(), tensor_shape.TensorShape([5]))
      self.assertAllEqual(uniform.event_shape().eval(), [])
      self.assertEqual(uniform.get_event_shape(), tensor_shape.TensorShape([]))
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformPDFWithScalarEndpoint(self):
    with self.test_session():
      a = constant_op.constant([0.0, 5.0])
      b = constant_op.constant(10.0)
      uniform = uniform_lib.Uniform(a=a, b=b)

      x = np.array([0.0, 8.0], dtype=np.float32)
      expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])

      pdf = uniform.prob(x)
      self.assertAllClose(expected_pdf, pdf.eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformAssertMaxGtMin(self):
    with self.test_session():
      a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
      b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
      uniform = uniform_lib.Uniform(a=a_v, b=b_v, validate_args=True)

      with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
                                               "x < y"):
        uniform.a.eval()
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def _testUniformSampleMultiDimensional(self):
    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
    with self.test_session():
      batch_size = 2
      a_v = [3.0, 22.0]
      b_v = [13.0, 35.0]
      a = constant_op.constant([a_v] * batch_size)
      b = constant_op.constant([b_v] * batch_size)

      uniform = uniform_lib.Uniform(a=a, b=b)

      n_v = 100000
      n = constant_op.constant(n_v)
      samples = uniform.sample(n)
      self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))

      sample_values = samples.eval()

      self.assertFalse(
          np.any(sample_values[:, 0, 0] < a_v[0]) or
          np.any(sample_values[:, 0, 0] >= b_v[0]))
      self.assertFalse(
          np.any(sample_values[:, 0, 1] < a_v[1]) or
          np.any(sample_values[:, 0, 1] >= b_v[1]))

      self.assertAllClose(
          sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformMean(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b - a)
      self.assertAllClose(uniform.mean().eval(), s_uniform.mean())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformVariance(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = uniform_lib.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b - a)
      self.assertAllClose(uniform.variance().eval(), s_uniform.var())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformSamplePdf(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = uniform_lib.Uniform(a, b)
      self.assertTrue(
          math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0).eval())
项目:DeepLearning_VirtualReality_BigData_Project    作者:rashmitripathi    | 项目源码 | 文件源码
def testUniformBroadcasting(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = uniform_lib.Uniform(a, b)

      pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
      expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
      self.assertAllClose(expected_pdf, pdf.eval())
项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def generate_rrab_lightcurve(
        times,
        mags=None,
        errs=None,
        paramdists={
            'period':sps.uniform(loc=0.45,scale=0.35),
            'fourierorder':[8,11],
            'amplitude':sps.uniform(loc=0.4,scale=0.5),
            'phioffset':np.pi,
        },
        magsarefluxes=False
):
    '''This generates fake RRab light curves.

    times is an array of time values that will be used as the time base.

    mags and errs will have the model mags applied to them. If either is None,
    np.full_like(times, 0.0) will used as a substitute.

    paramdists is a dict containing parameter distributions to use for the
    transitparams, in order:

    {'period', 'fourierorder', 'amplitude'}

    These are all 'frozen' scipy.stats distribution objects, e.g.:

    https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions

    The minimum light curve epoch will be automatically chosen from a uniform
    distribution between times.min() and times.max().

    The amplitude will be flipped automatically as appropriate if
    magsarefluxes=True.

    '''

    modeldict = generate_sinusoidal_lightcurve(times,
                                               mags=mags,
                                               errs=errs,
                                               paramdists=paramdists,
                                               magsarefluxes=magsarefluxes)
    modeldict['vartype'] = 'RRab'
    return modeldict
项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def generate_rrc_lightcurve(
        times,
        mags=None,
        errs=None,
        paramdists={
            'period':sps.uniform(loc=0.10,scale=0.30),
            'fourierorder':[2,3],
            'amplitude':sps.uniform(loc=0.1,scale=0.3),
            'phioffset':1.5*np.pi,
        },
        magsarefluxes=False
):
    '''This generates fake RRc light curves.

    times is an array of time values that will be used as the time base.

    mags and errs will have the model mags applied to them. If either is None,
    np.full_like(times, 0.0) will used as a substitute.

    paramdists is a dict containing parameter distributions to use for the
    transitparams, in order:

    {'period', 'fourierorder', 'amplitude'}

    These are all 'frozen' scipy.stats distribution objects, e.g.:

    https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions

    The minimum light curve epoch will be automatically chosen from a uniform
    distribution between times.min() and times.max().

    The amplitude will be flipped automatically as appropriate if
    magsarefluxes=True.

    '''

    modeldict = generate_sinusoidal_lightcurve(times,
                                               mags=mags,
                                               errs=errs,
                                               paramdists=paramdists,
                                               magsarefluxes=magsarefluxes)
    modeldict['vartype'] = 'RRc'
    return modeldict