Python numpy.random 模块,seed() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.random.seed()

项目:iCount    作者:tomazc    | 项目源码 | 文件源码
def make_fasta_file(sequences=None, headers=None, out_file=None, num_sequences=10, seq_len=80,
                    rnd_seed=None):
    """Make artificial FASTA file."""
    random.seed(rnd_seed)  # pylint:disable=no-member
    if sequences is None and headers is None:
        headers = ['{}'.format(i + 1) for i in range(num_sequences)]
        random_seeds = random.randint(10**5, size=num_sequences)  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif sequences is None:
        random_seeds = random.randint(10**5, size=len(headers))  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif headers is None:
        headers = ['{}'.format(i + 1) for i in range(len(sequences))]

    if out_file is None:
        out_file = get_temp_file_name(extension='fasta')
    with open(out_file, 'wt') as ofile:
        for header, seq in zip(headers, sequences):
            ofile.write('>' + header + '\n')
            ofile.write(seq + '\n')

    return os.path.abspath(out_file)
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def test_returns(self, seed_value, window_length):

        returns = Returns(window_length=window_length)

        today = datetime64(1, 'ns')
        assets = arange(3)
        out = empty((3,), dtype=float)

        seed(seed_value)  # Seed so we get deterministic results.
        test_data = abs(randn(window_length, 3))

        # Calculate the expected returns
        expected = (test_data[-1] - test_data[0]) / test_data[0]

        out = empty((3,), dtype=float)
        returns.compute(today, assets, out, test_data)

        check_allclose(expected, out)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def test_hess_vector_prod():
    npr.seed(1)
    randv = npr.randn(10)
    def fun(x):
        return np.sin(np.dot(x, randv))
    df = grad(fun)
    def vector_product(x, v):
        return np.sin(np.dot(v, df(x)))
    ddf = grad(vector_product)
    A = npr.randn(10)
    B = npr.randn(10)
    check_grads(fun, A)
    check_grads(vector_product, A, B)

# TODO:
# Grad three or more, wrt different args
# Diamond patterns
# Taking grad again after returning const
# Empty functions
# 2nd derivatives with fanout, thinking about the outgrad adder
项目:deep-makeover    作者:david-gpu    | 项目源码 | 文件源码
def _setup_tensorflow():
    # Create session
    config = tf.ConfigProto(log_device_placement=False) #, intra_op_parallelism_threads=1)
    sess   = tf.Session(config=config)

    # Initialize all RNGs with a deterministic seed
    with sess.graph.as_default():
        tf.set_random_seed(FLAGS.random_seed)

    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    return sess


# TBD: Move to dm_train.py?
项目:block    作者:bamos    | 项目源码 | 文件源码
def test_np():
    npr.seed(0)

    nx, nineq, neq = 4, 6, 7
    Q = npr.randn(nx, nx)
    G = npr.randn(nineq, nx)
    A = npr.randn(neq, nx)
    D = np.diag(npr.rand(nineq))

    K_ = np.bmat((
        (Q, np.zeros((nx, nineq)), G.T, A.T),
        (np.zeros((nineq, nx)), D, np.eye(nineq), np.zeros((nineq, neq))),
        (G, np.eye(nineq), np.zeros((nineq, nineq + neq))),
        (A, np.zeros((neq, nineq + nineq + neq)))
    ))

    K = block((
        (Q,   0, G.T, A.T),
        (0,   D, 'I',   0),
        (G, 'I',   0,   0),
        (A,   0,   0,   0)
    ))

    assert np.allclose(K_, K)
项目:Generative-Adversarial-Network    作者:K-Du    | 项目源码 | 文件源码
def setup_tensorflow():
    # Create session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction)
    config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options)
    sess = tf.Session(config=config)

    # Initialize rng with a deterministic seed
    with sess.graph.as_default():
        tf.set_random_seed(FLAGS.random_seed)

    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

    return sess, summary_writer
项目:ToolsLibrary    作者:albertmenglongli    | 项目源码 | 文件源码
def random_context(random_module, seed):
    assert random_module in ('random', 'numpy.random')

    if random_module == 'random':
        import random as _random
        get_state_method_str = 'getstate'
        set_state_method_str = 'setstate'
    elif random_module == 'numpy.random':
        from numpy import random as _random
        get_state_method_str = 'get_state'
        set_state_method_str = 'set_state'
    else:
        raise Exception('Not supported random module {random_module}'.format(random_module=random_module))

    old_state = getattr(_random, get_state_method_str)()
    try:
        _random.seed(seed)
        yield _random
    finally:
        getattr(_random, set_state_method_str)(old_state)
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def setXY(self, X, y):
        self.X = X
        self.y = y
        row = X.shape[1]
        col = y.shape[1]
        #random.seed(6)
        #self.beta = random.random(size=(row, col))
        #self.beta = 2 * self.beta - 1
        # self.beta=np.loadtxt('../toyData/lmm_tree_beta.csv', delimiter=',') # p.loadtxt('../toyData/group_beta.csv', delimiter=',')
        self.beta=np.zeros((row,col))
        # L1, L2 = np.linalg.eigh(X.T.dot(X))
        # L1 = L1.max()
        #if self.maxEigen is None:
        s= np.linalg.svd(self.X, full_matrices=False)[1]
        L1 = np.max(s)
        L1 = L1*L1
        #else:
            #L1 = self.maxEigen
        self.L = L1
项目:qpth    作者:locuslab    | 项目源码 | 文件源码
def get_grads(nBatch=1, nz=10, neq=1, nineq=3, Qscale=1.,
              Gscale=1., hscale=1., Ascale=1., bscale=1.):
    assert(nBatch == 1)
    npr.seed(1)
    L = np.random.randn(nz, nz)
    Q = Qscale * L.dot(L.T)
    G = Gscale * npr.randn(nineq, nz)
    # h = hscale*npr.randn(nineq)
    z0 = npr.randn(nz)
    s0 = npr.rand(nineq)
    h = G.dot(z0) + s0
    A = Ascale * npr.randn(neq, nz)
    # b = bscale*npr.randn(neq)
    b = A.dot(z0)

    p = npr.randn(nBatch, nz)
    # print(np.linalg.norm(p))
    truez = npr.randn(nBatch, nz)

    Q, p, G, h, A, b, truez = [x.astype(np.float64) for x in
                               [Q, p, G, h, A, b, truez]]
    _, zhat, nu, lam, slacks = qp_cvxpy.forward_single_np(Q, p[0], G, h, A, b)

    grads = get_grads_torch(Q, p, G, h, A, b, truez)
    return [p[0], Q, G, h, A, b, truez], grads
项目:pytest-randomly    作者:pytest-dev    | 项目源码 | 文件源码
def pytest_addoption(parser):
    group = parser.getgroup('randomly', 'Randomizes tests')
    group._addoption(
        '--randomly-seed', action='store', dest='randomly_seed',
        default=int(time.time()), type=int,
        help="""Set the seed that pytest-randomly uses. Default behaviour:
                use time.time()"""
    )
    group._addoption(
        '--randomly-dont-reset-seed', action='store_false',
        dest='randomly_reset_seed', default=True,
        help="""Stop pytest-randomly from resetting random.seed() at the
                start of every test context (e.g. TestCase) and individual
                test."""
    )
    group._addoption(
        '--randomly-dont-reorganize', action='store_false',
        dest='randomly_reorganize', default=True,
        help="Stop pytest-randomly from randomly reorganizing the test order."
    )
项目:pytest-randomly    作者:pytest-dev    | 项目源码 | 文件源码
def _reseed(config, offset=0):
    seed = config.getoption('randomly_seed') + offset
    if seed not in random_states:
        random.seed(seed)
        random_states[seed] = random.getstate()
    else:
        random.setstate(random_states[seed])

    if have_factory_boy:
        factory_set_random_state(random_states[seed])

    if have_faker:
        faker_random.setstate(random_states[seed])

    if have_numpy:
        if seed not in np_random_states:
            np_random.seed(seed)
            np_random_states[seed] = np_random.get_state()
        else:
            np_random.set_state(np_random_states[seed])
项目:lhsmdu    作者:sahilm89    | 项目源码 | 文件源码
def sample(numDimensions, numSamples, scalingFactor=scalingFactor, numToAverage = numToAverage, randomSeed=randomSeed ):
    ''' Main LHS-MDU sampling function '''
    random.seed(randomSeed) ## Seeding the random number generator.

    ### Number of realizations (I) = Number of samples(L) x scale for oversampling (M)
    numRealizations = scalingFactor*numSamples ## Number of realizations (I)
    ### Creating NxI realization matrix
    matrixOfRealizations =  createRandomStandardUniformMatrix(numDimensions, numRealizations)

    ### Finding distances between column vectors of the matrix to create a distance matrix.
    distance_1D = findUpperTriangularColumnDistanceVector(matrixOfRealizations, numRealizations)

    ## Eliminating columns from the realization matrix, using the distance measure  to get a strata
    ## matrix with number of columns as number of samples requried.

    global matrixOfStrata
    matrixOfStrata = eliminateRealizationsToStrata(distance_1D, matrixOfRealizations, numSamples)

    matrixOfSamples = resample() 

    return matrixOfSamples
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def test_backward_pass():
    npr.seed(1)

    N   = 10
    D   = 3

    alpha = Hyperparameter(
        initial_value = 2*np.ones(D),
        prior         = priors.Lognormal(1.5),
        name          = 'alpha'
    )

    beta = Hyperparameter(
        initial_value = 0.5*np.ones(D),
        prior         = priors.Lognormal(1.5),
        name          = 'beta'
    )

    bw = BetaWarp(D, alpha=alpha, beta=beta)

    data = 0.5*np.ones(D)
    v    = npr.randn(D)

    bw.forward_pass(data)
    assert np.all(bw.backward_pass(v) == 0.53033008588991071*v)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def test_backward_pass():
    npr.seed(1)

    N   = 10
    D   = 3

    alpha = Hyperparameter(
        initial_value = 2*np.ones(D),
        prior         = priors.Lognormal(1.5),
        name          = 'alpha'
    )

    beta = Hyperparameter(
        initial_value = 0.5*np.ones(D),
        prior         = priors.Lognormal(1.5),
        name          = 'beta'
    )

    bw = KumarWarp(D, alpha=alpha, beta=beta)

    data = 0.5*np.ones(D)
    v    = npr.randn(D)

    bw.forward_pass(data)
    assert np.all(bw.backward_pass(v) == 0.5773502691896257*v)
项目:product-taz    作者:TheAnomalieZ    | 项目源码 | 文件源码
def test_fit():
    npr.seed(1)

    N = 10
    D = 5

    gp = GP(D, burnin=5)

    inputs  = npr.rand(N,D)
    pending = npr.rand(3,D)
    W       = npr.randn(D,1)
    vals    = inputs.dot(W).flatten() + np.sqrt(1e-3)*npr.randn(N)

    gp.fit(inputs, vals, pending)

    assert gp.chain_length == 15
    assert all([np.all(p.value != p.initial_value) for p in gp.params.values()])
    assert len(gp._cache_list) == 10
    assert len(gp._hypers_list) == 10
    assert len(gp._fantasy_values_list) == 10
项目:thunder-factorization    作者:thunder-project    | 项目源码 | 文件源码
def test_svd(eng):
    x = make_low_rank_matrix(n_samples=10, n_features=5, random_state=0)
    x = fromarray(x, engine=eng)

    from sklearn.utils.extmath import randomized_svd
    u1, s1, v1 = randomized_svd(x.toarray(), n_components=2,  random_state=0)

    u2, s2, v2 = SVD(k=2, method='direct').fit(x)
    assert allclose_sign(u1, u2)
    assert allclose(s1, s2)
    assert allclose_sign(v1.T, v2.T)

    u2, s2, v2 = SVD(k=2, method='em', max_iter=100, seed=0).fit(x)
    tol = 1e-1
    assert allclose_sign(u1, u2, atol=tol)
    assert allclose(s1, s2, atol=tol)
    assert allclose_sign(v1.T, v2.T, atol=tol)
项目:thunder-factorization    作者:thunder-project    | 项目源码 | 文件源码
def test_pca(eng):
    x = make_low_rank_matrix(n_samples=10, n_features=5, random_state=0)
    x = fromarray(x, engine=eng)

    from sklearn.decomposition import PCA as skPCA
    pca = skPCA(n_components=2)
    t1 = pca.fit_transform(x.toarray())
    w1_T = pca.components_

    t2, w2_T = PCA(k=2, svd_method='direct').fit(x)
    assert allclose_sign(w1_T.T, w2_T.T)
    assert allclose_sign(t1, t2)

    t2, w2_T = PCA(k=2, svd_method='em', max_iter=100, seed=0).fit(x)
    tol = 1e-1
    assert allclose_sign(w1_T.T, w2_T.T, atol=tol)
    assert allclose_sign(t1, t2, atol=tol)
项目:thunder-factorization    作者:thunder-project    | 项目源码 | 文件源码
def test_ica(eng):
    t = linspace(0, 10, 100)
    s1 = sin(t)
    s2 = square(sin(2*t))
    x = c_[s1, s2, s1+s2]
    random.seed(0)
    x += 0.001*random.randn(*x.shape)
    x = fromarray(x, engine=eng)

    def normalize_ICA(s, aT):
        a = aT.T
        c = a.sum(axis=0)
        return s*c, (a/c).T

    from sklearn.decomposition import FastICA
    ica = FastICA(n_components=2, fun='cube', random_state=0)
    s1 = ica.fit_transform(x.toarray())
    aT1 = ica.mixing_.T
    s1, aT1 = normalize_ICA(s1, aT1)

    s2, aT2 = ICA(k=2, svd_method='direct', max_iter=200, seed=0).fit(x)
    s2, aT2 = normalize_ICA(s2, aT2)
    tol=1e-1
    assert allclose_sign_permute(s1, s2, atol=tol)
    assert allclose_sign_permute(aT1, aT2, atol=tol)
项目:thunder-factorization    作者:thunder-project    | 项目源码 | 文件源码
def test_nmf(eng):

    t = linspace(0, 10, 100)
    s1 = 1 + absolute(sin(t))
    s2 = 1 + square(cos(2*t))

    h = c_[s1, s2].T
    w = array([[1, 0], [0, 1], [1, 1]])
    x = dot(w, h)
    x = fromarray(x, engine=eng)

    from sklearn.decomposition import NMF as skNMF
    nmf = skNMF(n_components=2, random_state=0)
    w1 = nmf.fit_transform(x.toarray())
    h1 = nmf.components_
    xhat1 = dot(w1, h1)

    w2, h2 = NMF(k=2, seed=0).fit(x)
    xhat2 = dot(w2, h2)

    tol=1e-1
    assert allclose(xhat1, xhat2, atol=tol)
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def _kshape(x, k, initial_clustering=None):
    """
    >>> from numpy.random import seed; seed(0)
    >>> _kshape(np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 2)
    (array([0, 0, 1, 0]), array([[-1.2244258 , -0.35015476,  0.52411628,  1.05046429],
           [-0.8660254 ,  0.8660254 , -0.8660254 ,  0.8660254 ]]))
    """
    m = x.shape[0]

    if initial_clustering is not None:
        assert len(initial_clustering) == m, "Initial assigment does not match column length"
        idx = initial_clustering
    else:
        idx = randint(0, k, size=m)

    print(idx)

    centroids = np.zeros((k,x.shape[1]))
    distances = np.empty((m, k))

    for _ in range(100):
        old_idx = idx
        for j in range(k):
            centroids[j] = _extract_shape(idx, x, j, centroids[j])

        for i in range(m):
             for j in range(k):
                 distances[i,j] = 1 - max(_ncc_c(x[i], centroids[j]))
        idx = distances.argmin(1)
        if np.array_equal(old_idx, idx):
            break

    print(idx)

    return idx, centroids
项目:sgcrfpy    作者:dswah    | 项目源码 | 文件源码
def test_theta_0():
    rng.seed(0)
    n_samples = 100
    Y = rng.randn(n_samples, 5)
    X = rng.randn(n_samples, 5)

    sgcrf = SparseGaussianCRF(lamL=0.01, lamT=0.01)
    sgcrf.fit(X, Y)

    assert np.allclose(sgcrf.Lam, np.eye(5), .1, .2)
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def test_rsi(self, seed_value, expected):

        rsi = RSI()

        today = datetime64(1, 'ns')
        assets = arange(3)
        out = empty((3,), dtype=float)

        seed(seed_value)  # Seed so we get deterministic results.
        test_data = abs(randn(15, 3))

        out = empty((3,), dtype=float)
        rsi.compute(today, assets, out, test_data)

        check_allclose(expected, out)
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def test_returns(self, seed_value, window_length):

        returns = Returns(window_length=window_length)

        today = datetime64(1, 'ns')
        assets = arange(3)
        out = empty((3,), dtype=float)

        seed(seed_value)  # Seed so we get deterministic results.
        test_data = abs(randn(window_length, 3))

        # Calculate the expected returns
        expected = (test_data[-1] - test_data[0]) / test_data[0]

        out = empty((3,), dtype=float)
        returns.compute(today, assets, out, test_data)

        check_allclose(expected, out)
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def test_masked_rankdata_2d(self,
                                seed_value,
                                method,
                                use_mask,
                                set_missing,
                                ascending):
        eyemask = ~eye(5, dtype=bool)
        nomask = ones((5, 5), dtype=bool)

        seed(seed_value)
        asfloat = (randn(5, 5) * seed_value)
        asdatetime = (asfloat).copy().view('datetime64[ns]')

        mask = eyemask if use_mask else nomask
        if set_missing:
            asfloat[:, 2] = nan
            asdatetime[:, 2] = NaTns

        float_result = masked_rankdata_2d(
            data=asfloat,
            mask=mask,
            missing_value=nan,
            method=method,
            ascending=True,
        )
        datetime_result = masked_rankdata_2d(
            data=asdatetime,
            mask=mask,
            missing_value=NaTns,
            method=method,
            ascending=True,
        )

        check_arrays(float_result, datetime_result)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def split(args):
    seed(1)

    if not args.clonotype_assignments:
        return {'chunks':[{'chunk_clonotypes':[]}]}

    # Distribute clonotypes to chunks. The input file might be sorted by frequency
    # so make sure you shuffle the clonotypes in order to better distribute load.
    with open(args.clonotype_assignments) as f:
        clonotypes = json.load(f)
    num_clonotypes = len(clonotypes)

    clonotypes_per_chunk = max(MIN_CLONOTYPES_PER_CHUNK, np.ceil(float(num_clonotypes) / MAX_CHUNKS))
    num_chunks = int(np.ceil(float(num_clonotypes) / clonotypes_per_chunk))

    chunk_clonotypes = [[] for _ in range(num_chunks)]

    chunks = []

    if num_clonotypes > 0:
        # Pick a chunk 0..num_chunks num_clonotypes times.
        chunk_assignments = choice(np.arange(num_chunks), num_clonotypes, replace=True)
        for idx, clonotype_id in enumerate(clonotypes.iterkeys()):
            chunk_clonotypes[chunk_assignments[idx]].append(clonotype_id)

        chunks = [{'chunk_clonotypes':c, '__mem_gb': 12.0} for c in chunk_clonotypes if len(c) > 0]

    if len(chunks) == 0:
        chunks = [{'chunk_clonotypes':[]}]

    return {'chunks': chunks, 'join': {'__mem_gb': 16.0}}
项目:tensorflow-srgan    作者:olgaliak    | 项目源码 | 文件源码
def setup_tensorflow():
    # Create session
    config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=config)

    # Initialize rng with a deterministic seed
    with sess.graph.as_default():
        tf.set_random_seed(FLAGS.random_seed)

    random.seed(FLAGS.random_seed)
    np.random.seed(FLAGS.random_seed)

    summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

    return sess, summary_writer
项目:iCount    作者:tomazc    | 项目源码 | 文件源码
def make_sequence(size, include_n=False, rnd_seed=None):
    """Make random DNA segment of length `size`."""
    random.seed(rnd_seed)  # pylint:disable=no-member
    if include_n:
        bases = ['A', 'C', 'G', 'T', 'N']
    else:
        bases = ['A', 'C', 'G', 'T']
    return ''.join(random.choice(bases, size))  # pylint: disable=no-member
项目:iCount    作者:tomazc    | 项目源码 | 文件源码
def make_quality_scores(size, min_chr=33, max_chr=74, rnd_seed=None):
    """Make random DNA segment of length `size`."""
    random.seed(rnd_seed)  # pylint:disable=no-member
    scores = [chr(i) for i in range(min_chr, max_chr + 1)]
    return ''.join(random.choice(scores, size))  # pylint: disable=no-member
项目:neuroblast    作者:ActiveState    | 项目源码 | 文件源码
def seed_random_number_generator():
    random.seed(1)
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def test_rsi(self, seed_value, expected):

        rsi = RSI()

        today = datetime64(1, 'ns')
        assets = arange(3)
        out = empty((3,), dtype=float)

        seed(seed_value)  # Seed so we get deterministic results.
        test_data = abs(randn(15, 3))

        out = empty((3,), dtype=float)
        rsi.compute(today, assets, out, test_data)

        check_allclose(expected, out)
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def test_masked_rankdata_2d(self,
                                seed_value,
                                method,
                                use_mask,
                                set_missing,
                                ascending):
        eyemask = ~eye(5, dtype=bool)
        nomask = ones((5, 5), dtype=bool)

        seed(seed_value)
        asfloat = (randn(5, 5) * seed_value)
        asdatetime = (asfloat).copy().view('datetime64[ns]')

        mask = eyemask if use_mask else nomask
        if set_missing:
            asfloat[:, 2] = nan
            asdatetime[:, 2] = NaTns

        float_result = masked_rankdata_2d(
            data=asfloat,
            mask=mask,
            missing_value=nan,
            method=method,
            ascending=True,
        )
        datetime_result = masked_rankdata_2d(
            data=asdatetime,
            mask=mask,
            missing_value=NaTns,
            method=method,
            ascending=True,
        )

        check_arrays(float_result, datetime_result)
项目:bid2charge    作者:soton-agents    | 项目源码 | 文件源码
def old_generate_marginal_price_vector_from_seed(self,seed,units):
        if seed:
            nprnd.seed(seed)

        remaining = units

        length = (int)(nprnd.random() * min(4, remaining + 1))

        mmin = 0
        mmax = 1.0
        v1 = [round(elem, 2) for elem in (mmax - mmin) * nprnd.random(length) + mmin]

        remaining -= length
        length = (int)(nprnd.random() * min(4, remaining + 1))

        mmin = 0
        mmax = 2.0
        v2 = [round(elem, 2) for elem in (mmax - mmin) * nprnd.random(length) + mmin]

        remaining -= length

        mmin = 0.5
        mmax = 3.0
        v3 = [round(elem, 2) for elem in (mmax - mmin) * nprnd.random(remaining) + mmin]

        return v1 + v2 + v3
项目:bid2charge    作者:soton-agents    | 项目源码 | 文件源码
def generate_marginal_price_vector_from_seed(self,seed,units,old_style=False):

        if seed:
            if old_style:
                nprnd.seed(int(seed))
            else:
                nprnd.seed(int(seed % 4294967295))


        prices = []

        min_start = 0
        min_increase = 0.2

        max_start = 1
        max_increase = 0.4

        for i in range(units):
            min_p = min_start + i * min_increase
            max_p = max_start + i * max_increase
            r = min_p + nprnd.random() * (max_p - min_p)
            r = round(r,PRICE_ROUNDING)

            prices.append(r)

        return prices
项目:bid2charge    作者:soton-agents    | 项目源码 | 文件源码
def generate_marginal_price_vector_pk(self,units_available,user_pk,user_current_day,old_style=False):
        seed = (user_pk * RNDM_PK_MULTIPLIER + user_current_day) * RNDM_MARGINAL_VALUES_SEED
        return self.generate_marginal_price_vector_from_seed(seed, units_available,old_style)
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--minBps', type=int, default=1)
    parser.add_argument('--maxBps', type=int, default=10)
    parser.add_argument('--seqLen', type=int, default=100)
    parser.add_argument('--minHeight', type=int, default=10)
    parser.add_argument('--maxHeight', type=int, default=100)
    parser.add_argument('--noise', type=float, default=10)
    parser.add_argument('--nSamples', type=int, default=10000)
    parser.add_argument('--save', type=str, default='data/synthetic')
    args = parser.parse_args()

    npr.seed(0)

    save = args.save
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)

    X, Y = [], []
    for i in range(args.nSamples):
        Xi, Yi = sample(args)
        X.append(Xi); Y.append(Yi)
        if i == 0:
            fig, ax = plt.subplots(1, 1)
            plt.plot(Xi, label='Corrupted')
            plt.plot(Yi, label='Original')
            plt.legend()
            f = os.path.join(args.save, "example.png")
            fig.savefig(f)
            print("Created {}".format(f))

    X = np.array(X)
    Y = np.array(Y)

    for loc,arr in (('features.pt', X), ('labels.pt', Y)):
        fname = os.path.join(args.save, loc)
        with open(fname, 'wb') as f:
            torch.save(torch.Tensor(arr), f)
        print("Created {}".format(fname))
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--boardSz', type=int, default=2)
    parser.add_argument('--nSamples', type=int, default=10000)
    parser.add_argument('--data', type=str, default='data')
    args = parser.parse_args()

    npr.seed(0)

    save = os.path.join(args.data, str(args.boardSz))
    if os.path.isdir(save):
        shutil.rmtree(save)
    os.makedirs(save)

    X = []
    Y = []
    for i in tqdm(range(args.nSamples)):
        Xi, Yi = sample(args)
        X.append(Xi)
        Y.append(Yi)

    X = np.array(X)
    Y = np.array(Y)

    for loc,arr in (('features.pt', X), ('labels.pt', Y)):
        fname = os.path.join(save, loc)
        with open(fname, 'wb') as f:
            torch.save(torch.Tensor(arr), f)
        print("Created {}".format(fname))
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--nTrials', type=int, default=5)
    parser.add_argument('--nBatch', type=int, default=128)
    args = parser.parse_args()

    npr.seed(0)

    print('==== CPU ===\n')
    prof(args, False)

    print('\n\n==== GPU ===\n')
    prof(args, True)
项目:optnet    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--nTrials', type=int, default=5)
    parser.add_argument('--nBatch', type=int, default=128)
    args = parser.parse_args()

    npr.seed(0)

    # print('==== CPU ===\n')
    # prof(args, False)

    print('\n\n==== GPU ===\n')
    prof(args, True)
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def __init__(self, questions, answers, vocabulary, batch_size, sequence_length, one_hot_target, stream=False):
        random.seed(0)
        self.sequence_length = sequence_length
        self.vocabulary = vocabulary
        self.batch_size = batch_size
        self.one_hot_target = one_hot_target
        self.stream = stream

        self.questions = questions
        self.answers = answers
        self.inverse_vocabulary = dict((word, i) for i, word in enumerate(self.vocabulary))
项目:pynamd    作者:radakb    | 项目源码 | 文件源码
def __init__(self, nstates=2, max_samples=10, klow=1.0e-1, khi=1.0e1,
                 randseed=None, sample_fudge=0.0, unsampled_states=0):
        self.max_samples = int(max_samples)
        self.nstates = int(nstates)
        # Randomize the HO parameters.
        seed(randseed)
        klow, khi = float(klow), float(khi)
        #spacing = uniform(self.nstates, size=self.nstates)
        #k = klow*(khi / klow)**(spacing / self.nstates)
        # k = uniform(float(klow), float(khi), size=self.nstates)
        k = klow + (khi - klow)*exponential(1.0, self.nstates)
        sigma = sqrt(1/k)
        x0 = uniform(-0.5*sigma.max(), 0.5*sigma.max(), size=self.nstates)
        # Choose which states to sample from.
        nsampled_states = self.nstates - int(unsampled_states)
        sampled_indices = choice(arange(self.nstates), nsampled_states, False)
        sampled_indices.sort()
        # Generate samples up to max.
        x_in = normal(0.0, 1.0, (nsampled_states, self.max_samples))
        x_in *= sigma[sampled_indices, newaxis]
        x_in += x0[sampled_indices, newaxis]
        self.data_size = zeros(self.nstates, int32) 
        self.data_size[sampled_indices] += self.max_samples
        # Randomly remove samples for uneven sampling.  Note that at least one
        # state must remain the same, otherwise max_samples is incorrect.
        # Also, we don't actually have to do anything to the actual samples, bc
        # the sample size is used as a mask!
        #
        del_max = int(sample_fudge*self.max_samples + 0.5) + 1
        if del_max > 1:
            sample_shift = randint(0, del_max, nsampled_states)
            if all(sample_shift > 0): # Randomly reset the shift for a state.
                sample_shift[choice(arange(nsampled_states))] = 0
            self.data_size[sampled_indices] -= sample_shift
        self.unsampled_indices = where(self.data_size == 0)[0]
        # Compute the energy in all states
        u_ijn = 0.5*(k[:, newaxis]*(x_in[:, newaxis, :] - x0[:, newaxis])**2)
        self.u_ijn = u_ijn
        self.f_actual = 0.5*log(k / k[0])[1:]
        self.x0 = x0
        self.x_jn = x_in
项目:Herobraine    作者:MadcowD    | 项目源码 | 文件源码
def __init__(self,action_dimension,mu=0, theta=0.15, sigma=.1):
        self.action_dimension = action_dimension
        self.mu = mu
        self.theta = theta
        self.sigma = sigma
        self.state = np.ones(self.action_dimension) * self.mu
        self.reset()
        nr.seed(0)
项目:piradar    作者:scivision    | 项目源码 | 文件源码
def create_pseudo_random_code(clen=10000,rseed=0,verbose=False):
    """
    Create waveform files for hfradar
    Juha Vierinen
    """
    Npt = 200  # number of points to plot, just for plotting, arbitrary
    """
    seed is a way of reproducing the random code without having to store all actual codes.
    the seed can then act as a sort of station_id.
    """
    seed(rseed)

    """
    generate a uniform random phase modulated (complex) signal 'sig".
    It's single precision floating point for SDR, since DAC is typically <= 16 bits!
    """
    sig = np.exp(1j*2.0*np.pi*random(clen)).astype('complex64')

    if stuffr is not None:
        stuffr.plot_cts(sig[:Npt])

    if verbose and hist is not None:
        fg,ax = subplots(3,1)
        sca(ax[0])
        hist(sig.real)#,50)
        sca(ax[1])
        hist(sig.imag)

        #hist(random(clen))

    return sig
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_int_features_in_pipeline(self): 

        import numpy.random as rn
        import pandas as pd
        rn.seed(0)

        x_train_dict = [ dict( (rn.randint(100), 1) 
                          for i in range(20)) 
                            for j in range(100)]
        y_train = [0,1]*50

        from sklearn.pipeline import Pipeline
        from sklearn.feature_extraction import DictVectorizer
        from sklearn.linear_model import LogisticRegression

        pl = Pipeline([("dv", DictVectorizer()),  ("lm", LogisticRegression())])
        pl.fit(x_train_dict, y_train)

        import coremltools

        model = coremltools.converters.sklearn.convert(pl, input_features = "features", output_feature_names = "target")

        x = pd.DataFrame( {"features" : x_train_dict, 
                           "prediction" : pl.predict(x_train_dict)})

        cur_eval_metics = evaluate_classifier(model, x)
        self.assertEquals(cur_eval_metics['num_errors'], 0)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_conversion_boston(self):

        from sklearn.datasets import load_boston

        scikit_data = load_boston()

        sh = scikit_data.data.shape 

        rn.seed(0)
        missing_value_indices = [(rn.randint(sh[0]), rn.randint(sh[1])) 
                                    for k in range(sh[0])]

        for strategy in ["mean", "median", "most_frequent"]: 
            for missing_value in [0, 'NaN', -999]:

                X = np.array(scikit_data.data).copy()

                for i, j in missing_value_indices:
                    X[i,j] = missing_value

                model = Imputer(missing_values = missing_value, strategy = strategy)
                model = model.fit(X)

                tr_X = model.transform(X.copy())

                spec = converter.convert(model, scikit_data.feature_names, 'out')

                input_data = [dict(zip(scikit_data.feature_names, row)) 
                                for row in X]

                output_data = [{"out" : row} for row in tr_X]

                result = evaluate_transformer(spec, input_data, output_data)

                assert result["num_errors"] == 0
项目:DeepTrade    作者:happynoom    | 项目源码 | 文件源码
def _create_weights(self):
        with tf.variable_scope("weights"):
            self.weights = {
                'out': tf.get_variable("weights", [self.hidden_size, self.nclasses],
                                       initializer=tf.random_normal_initializer(mean=0, stddev=0.01, seed=1))
            }
            self.biases = {
                'out': tf.get_variable("bias", [self.nclasses], initializer=tf.random_normal_initializer(mean=0, stddev=0.01, seed=1))
            }
项目:block    作者:bamos    | 项目源码 | 文件源码
def test_linear_operator():
    npr.seed(0)

    nx, nineq, neq = 4, 6, 7
    Q = npr.randn(nx, nx)
    G = npr.randn(nineq, nx)
    A = npr.randn(neq, nx)
    D = np.diag(npr.rand(nineq))

    K_ = np.bmat((
        (Q, np.zeros((nx, nineq)), G.T, A.T),
        (np.zeros((nineq, nx)), D, np.eye(nineq), np.zeros((nineq, neq))),
        (G, np.eye(nineq), np.zeros((nineq, nineq + neq))),
        (A, np.zeros((neq, nineq + nineq + neq)))
    ))

    Q_lo = sla.aslinearoperator(Q)
    G_lo = sla.aslinearoperator(G)
    A_lo = sla.aslinearoperator(A)
    D_lo = sla.aslinearoperator(D)

    K = block((
        (Q_lo,    0,    G.T,    A.T),
        (0,    D_lo,    'I',      0),
        (G_lo,  'I',      0,      0),
        (A_lo,    0,      0,      0)
    ), arrtype=sla.LinearOperator)

    w1 = np.random.randn(K_.shape[1])
    assert np.allclose(K_.dot(w1), K.dot(w1))
    w2 = np.random.randn(K_.shape[0])
    assert np.allclose(K_.T.dot(w2), K.H.dot(w2))
    W = np.random.randn(*K_.shape)
    assert np.allclose(K_.dot(W), K.dot(W))
项目:qpth    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--nTrials', type=int, default=10)
    args = parser.parse_args()
    setproctitle.setproctitle('bamos.optnet.prof-linear')

    npr.seed(0)

    prof(args)
项目:qpth    作者:locuslab    | 项目源码 | 文件源码
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--nTrials', type=int, default=10)
    args = parser.parse_args()
    setproctitle.setproctitle('bamos.optnet.prof-gurobi')

    npr.seed(0)

    prof(args)
项目:DeepNeuralNet-QSAR    作者:Merck    | 项目源码 | 文件源码
def init_random(seed):
        import numpy.random as random
        random.seed(seed)
项目:redmapper    作者:erykoff    | 项目源码 | 文件源码
def runTest(self):
        file_path = 'data_for_tests'
        conf_filename = 'testconfig.yaml'
        confstr = Configuration(file_path + '/' + conf_filename)

        mask = HPMask(confstr) #Create the mask

        #set all the necessary inputs from test file
        mask.maskgals.exptime = 100.
        mask.maskgals.limmag  = 20.
        mask.maskgals.zp[0]   = 22.5
        mask.maskgals.nsig[0] = 10.
        #necessary as mask.maskgals.exptime has shape (6000,)
        mag_in                = np.full(6000, 1, dtype = float)
        mag_in[:6]            = np.array([16., 17., 18., 19., 20., 21.])

        #test without noise
        mag, mag_err = apply_errormodels(mask.maskgals, mag_in, nonoise = True)
        idx = np.array([0, 1, 2, 3, 4, 5])
        mag_idl     = np.array([16., 17., 18., 19., 20., 21.])
        mag_err_idl = np.array([0.00602535, 0.0107989, 0.0212915, 0.0463765, 0.108574, 0.264390])
        testing.assert_almost_equal(mag[idx], mag_idl)
        testing.assert_almost_equal(mag_err[idx], mag_err_idl, decimal = 6)

        #test with noise and set seed
        seed = 0
        random.seed(seed = seed)
        mag, mag_err = apply_errormodels(mask.maskgals, mag_in)

        idx = np.array([0, 1, 2, 3, 4, 5, 1257, 2333, 3876])
        mag_test = np.array([15.98942267, 16.99568733, 17.97935868, 
                             18.90075284, 19.81409659, 21.29508236,  
                             0.99999373,  1.00000663,  1.00000807])
        mag_err_test = np.array([5.96693051e-03, 1.07560575e-02, 2.08905241e-02, 
                                 4.23251692e-02, 9.14877522e-02, 3.46958444e-01,
                                 5.44154045e-06, 5.44160510e-06, 5.44161230e-06])
        testing.assert_almost_equal(mag[idx], mag_test)
        testing.assert_almost_equal(mag_err[idx], mag_err_test)