Python numpy.random 模块,random() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.random.random()

项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _get_glyph(gnum, height, width, shift_prob, shift_size):
  if isinstance(gnum, list):
    n = randint(*gnum)
  else:
    n = gnum

  glyph = random_points_in_circle(
      n, 0, 0, 0.5
      )*array((width, height), 'float')
  _spatial_sort(glyph)

  if random()<shift_prob:
    shift = ((-1)**randint(0,2))*shift_size*height
    glyph[:,1] += shift
  if random()<0.5:
    ii = randint(0,n-1,size=(1))
    xy = glyph[ii,:]
    glyph = row_stack((glyph, xy))


  return glyph
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def main():

  from numpy.random import random
  from numpy.random import randint

  from iutils.render import Render
  from modules.linear import Linear

  render = Render(SIZE, BACK, FRONT)
  render.clear_canvas()

  nsteps = 500
  height = 1.0

  for i in range(20):

    start = random(size=(1,2))
    start_w = 0
    grains = randint(20,150)
    scale = 0.005 + random()*0.02
    L = Linear(SIZE, height, start, start_w)
    L.steps(nsteps, scale=scale)
    show(render, L, grains)

  render.write_to_png('./linear.png')
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def test_FFT2(FFT2):
    N = FFT2.N
    if FFT2.rank == 0:
        A = random(N).astype(FFT2.float)

    else:
        A = zeros(N, dtype=FFT2.float)

    atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4)
    FFT2.comm.Bcast(A, root=0)
    a = zeros(FFT2.real_shape(), dtype=FFT2.float)
    c = zeros(FFT2.complex_shape(), dtype=FFT2.complex)
    a[:] = A[FFT2.real_local_slice()]
    c = FFT2.fft2(a, c)
    B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex)
    B2 = rfft2(A, B2, axes=(0,1))
    assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol)
    a = FFT2.ifft2(c, a)
    assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def random_points_in_circle(n,xx,yy,rr):
  """
  get n random points in a circle.
  """

  rnd = random(size=(n,3))
  t = TWOPI*rnd[:,0]
  u = rnd[:,1:].sum(axis=1)
  r = zeros(n,'float')
  mask = u>1.
  xmask = logical_not(mask)
  r[mask] = 2.-u[mask]
  r[xmask] = u[xmask]
  xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
  dartsxy  = xyp + array([xx,yy])
  return dartsxy
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def steps(self, nsteps=500, scale=0.01):

    xy = self.xy
    w = self.w
    one = self.one

    wstp = 0

    for i in range(1, nsteps):

      stp = array([[0,one]])*random()
      newxy = xy[i-1, :] - stp
      # neww = w[i-1] - (1.0-2.0*random())*one

      wstp += random()*one*scale
      neww = w[i-1] + wstp*random()

      xy[i,:] = newxy
      w[i] = neww
      self.itt += 1

      # neww = w[i-1] - random()*sqrt(i/100.0)*one


    return self.itt
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_points_in_circle(n,xx,yy,rr):
  """
  get n random points in a circle.
  """


  rnd = random(size=(n,3))
  t = 2.*PI*rnd[:,0]
  u = rnd[:,1:].sum(axis=1)
  r = zeros(n,'float')
  mask = u>1.
  xmask = logical_not(mask)
  r[mask] = 2.-u[mask]
  r[xmask] = u[xmask]
  xyp = reshape(rr*r,(n,1))*column_stack( (cos(t),sin(t)) )
  dartsxy  = xyp + array([xx,yy])
  return dartsxy
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_parallelogram(self, x1, y1, x2, y2, x3, y3, grains):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    v1 = array((x2-x1, y2-y1))
    v2 = array((x3-x1, y3-y1))

    a1 = random((grains, 1))
    a2 = random((grains, 1))

    dd = v1*a1 + v2*a2

    dd[:, 0] += x1
    dd[:, 1] += y1

    for x, y in dd:
      rectangle(x, y, pix, pix)
      fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_triangle(self, x1, y1, x2, y2, x3, y3, grains):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    v1 = array((x2-x1, y2-y1))
    v2 = array((x3-x1, y3-y1))

    a1 = random((2*grains, 1))
    a2 = random((2*grains, 1))

    mask = ((a1+a2)<1).flatten()

    ## discarding half the grains because i am too tired to figure out how to
    ## map the parallelogram to the triangle

    dd = v1*a1 + v2*a2

    dd[:, 0] += x1
    dd[:, 1] += y1

    for x, y in dd[mask, :]:
      rectangle(x, y, pix, pix)
      fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def random_circle(self, x1, y1, r, grains):
    """
    random points in circle. nonuniform distribution.
    """

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    the = random(grains)*pi*2
    rad = random(grains)*r

    xx = x1 + cos(the)*rad
    yy = y1 + sin(the)*rad

    for x, y in zip(xx, yy):
      rectangle(x, y, pix, pix)
      fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def sandstroke_non_linear(self,xys,grains=10,left=True):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])

    dd = sqrt(square(dx)+square(dy))

    for i,d in enumerate(dd):
      rnd = sqrt(random((grains,1)))
      if left:
        rnd = 1.0-rnd

      for x,y in xys[i,:2] + directions[i,:]*rnd*d:
        rectangle(x,y,pix,pix)
        fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def get_colors_from_file(self, fn):

    import Image
    from numpy.random import shuffle

    def p(f):
      return float('{:0.5f}'.format(f))

    scale = 1./255.
    im = Image.open(fn)
    w, h = im.size
    rgbim = im.convert('RGB')
    res = []
    for i in range(0, w):
      for j in range(0, h):
        r, g, b = rgbim.getpixel((i, j))
        res.append([p(r*scale), p(g*scale), p(b*scale)])

    shuffle(res)

    self.colors = res
    self.ncolors = len(res)
项目:differential-line-cuda    作者:inconvergent    | 项目源码 | 文件源码
def spawn_curl(self, limit, prob=0.01, t=None):

    links = self.links
    link_len = self.link_len
    xy = self.xy
    num = self.num

    curve = sqrt(self.link_curv[1:num,0])
    for i, (r, t) in enumerate(zip(random(num), curve)):

      b = links[i,1]

      if r>t and link_len[i,1]>limit:

        newxy = (xy[b,:]+xy[i,:])*0.5
        xy[num,:] = newxy

        links[i,1] = num
        links[num,0] = i
        links[num,1] = b
        links[b,0] = num
        num += 1

    self.num = num
项目:iCount    作者:tomazc    | 项目源码 | 文件源码
def make_fasta_file(sequences=None, headers=None, out_file=None, num_sequences=10, seq_len=80,
                    rnd_seed=None):
    """Make artificial FASTA file."""
    random.seed(rnd_seed)  # pylint:disable=no-member
    if sequences is None and headers is None:
        headers = ['{}'.format(i + 1) for i in range(num_sequences)]
        random_seeds = random.randint(10**5, size=num_sequences)  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif sequences is None:
        random_seeds = random.randint(10**5, size=len(headers))  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif headers is None:
        headers = ['{}'.format(i + 1) for i in range(len(sequences))]

    if out_file is None:
        out_file = get_temp_file_name(extension='fasta')
    with open(out_file, 'wt') as ofile:
        for header, seq in zip(headers, sequences):
            ofile.write('>' + header + '\n')
            ofile.write(seq + '\n')

    return os.path.abspath(out_file)
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def sample(self):

        from numpy.random import random
        from numpy import add
        from csb.numeric import log_sum_exp

        log_m = self.log_masses()
        log_M = log_sum_exp(log_m)
        c = add.accumulate(exp(log_m - log_M))
        u = random()
        j = (u > c).sum()

        a = self.dh[j]
        z = self.z()

        xmin, xmax = z[j], z[j + 1]

        u = random()

        if a > 0:
            return xmax + log(u + (1 - u) * exp(-a * (xmax - xmin))) / a
        else:
            return xmin + log(u + (1 - u) * exp(a * (xmax - xmin))) / a
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def sample(self, maxiter=100):

        from numpy.random import random

        for i in range(maxiter):

            x = self.hull.sample()
            l = self.hull.l(x)
            u = self.hull.u(x)
            w = random()

            if w <= exp(l - u): return x

            h, dh = self.logp(x)

            if w <= exp(h - u): return x

            self.hull.insert(x, h, dh)
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def sample_sphere3d(radius=1., n_samples=1):
    """
    Sample points from 3D sphere.

    @param radius: radius of the sphere
    @type radius: float

    @param n_samples: number of samples to return
    @type n_samples: int

    @return: n_samples times random cartesian coordinates inside the sphere
    @rtype: numpy array
    """
    from numpy.random  import random
    from numpy import arccos, transpose, cos, sin, pi, power

    r = radius * power(random(n_samples), 1 / 3.)
    theta = arccos(2. * (random(n_samples) - 0.5))
    phi = 2 * pi * random(n_samples)

    x = cos(phi) * sin(theta) * r
    y = sin(phi) * sin(theta) * r
    z = cos(theta) * r

    return transpose([x, y, z])
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def sample_from_histogram(p, n_samples=1):
    """
    returns the indice of bin according to the histogram p

    @param p: histogram
    @type p: numpy.array
    @param n_samples: number of samples to generate
    @type n_samples: integer
    """

    from numpy import add, less, argsort, take, arange
    from numpy.random import random

    indices = argsort(p)
    indices = take(indices, arange(len(p) - 1, -1, -1))

    c = add.accumulate(take(p, indices)) / add.reduce(p)

    return indices[add.reduce(less.outer(c, random(n_samples)), 0)]
项目:CSB    作者:csb-toolbox    | 项目源码 | 文件源码
def gen_inv_gaussian(a, b, p, burnin=10):
    """
    Sampler based on Gibbs sampling.
    Assumes scalar p.
    """
    from numpy.random import gamma
    from numpy import sqrt

    s = a * 0. + 1.

    if p < 0:
        a, b = b, a

    for i in range(burnin):

        l = b + 2 * s
        m = sqrt(l / a)

        x = inv_gaussian(m, l, shape=m.shape)
        s = gamma(abs(p) + 0.5, x)

    if p >= 0:
        return x
    else:
        return 1 / x
项目:Personal_AI_Assistant    作者:PratylenClub    | 项目源码 | 文件源码
def add_list_of_words_in_w2v_model(self, unknown_words):
        huge_w2v_model_file = open(self.w2v_huge_model_path, "r")
        current_w2v_model_file = open(self.w2v_model_path, "a")
        line = huge_w2v_model_file.readline()
        unknown_words_left = len(unknown_words)
        while line and unknown_words_left:
            word = line.split()[0]
            if word in unknown_words:
                current_w2v_model_file.write(line)
                unknown_words = unknown_words - set([word])
                unknown_words_left -= 1
            line = huge_w2v_model_file.readline()
        for word in list(unknown_words):
            random_position = random(self.w2v_model.vector_size)*2-1
            current_w2v_model_file.write(" ".join(([word]+[str(x) for x in random_position])))
            print "warning random positions introduced for new words ... in the future this should be solved"
        current_w2v_model_file.close()
        huge_w2v_model_file.close()
项目:Personal_AI_Assistant    作者:PratylenClub    | 项目源码 | 文件源码
def add_list_of_words_in_w2v_model(self, unknown_words):
        huge_w2v_model_file = open(self.w2v_huge_model_path, "r")
        current_w2v_model_file = open(self.w2v_model_path, "a")
        line = huge_w2v_model_file.readline()
        unknown_words_left = len(unknown_words)
        while line and unknown_words_left:
            word = line.split()[0]
            if word in unknown_words:
                current_w2v_model_file.write(line)
                unknown_words = unknown_words - set([word])
                unknown_words_left -= 1
            line = huge_w2v_model_file.readline()
        for word in list(unknown_words):
            random_position = random(self.w2v_model.vector_size)*2-1
            current_w2v_model_file.write(" ".join(([word]+[str(x) for x in random_position])))
            print "warning random positions introduced for new words ... in the future this should be solved"
        current_w2v_model_file.close()
        huge_w2v_model_file.close()
项目:isp-data-pollution    作者:essandess    | 项目源码 | 文件源码
def add_url_links(self,links,url=''):
        k = 0
        for link in sorted(links,key=lambda k: random.random()):
            lp = uprs.urlparse(link)
            if (lp.scheme == 'http' or lp.scheme == 'https') and not self.blacklisted(link):
                if self.add_link(link): k += 1
                if k > self.max_links_per_page: break
        if self.verbose or self.debug:
            current_url = url  # default
            try:
                @self.phantomjs_short_timeout
                def phantomjs_current_url(): return self.driver.current_url
                current_url = phantomjs_current_url()
                # the current_url method breaks on a lot of sites, e.g.
                # python3 -c 'from selenium import webdriver; driver = webdriver.PhantomJS(); driver.get("https://github.com"); print(driver.title); print(driver.current_url); driver.quit()'
            except Exception as e:
                if self.debug: print('.current_url exception:\n{}'.format(e))
        if self.debug:
            print("{}: {:d} links added, {:d} total, {:.1f} bits domain entropy".format(current_url,k,self.link_count(),self.domain_entropy()))
        elif self.verbose:
            self.print_progress(current_url,num_links=k)
项目:sand-spline    作者:inconvergent    | 项目源码 | 文件源码
def __next__(self):
    try:
      g = next(self.guide)
    except Exception:
      raise StopIteration

    pnum = self.pnum

    r = 1.0-2.0*random(pnum)
    self.noise[:] += r*self.scale

    a = random(pnum)*TWOPI
    rnd = column_stack((cos(a), sin(a)))

    self.path += rnd * reshape(self.noise, (self.pnum,1))
    self.interpolated_path = _rnd_interpolate(self.path, self.inum, ordered=ORDERED)

    self.i+=1
    return g + self.interpolated_path
项目:sand-spline    作者:inconvergent    | 项目源码 | 文件源码
def get_colors(f, do_shuffle=True):
  from numpy import array
  try:
    import Image
  except Exception:
    from PIL import Image

  im = Image.open(f)
  data = array(list(im.convert('RGB').getdata()),'float')/255.0

  res = []
  for rgb in data:
    res.append(list(rgb))

  if do_shuffle:
    from numpy.random import shuffle
    shuffle(res)
  return res
项目:TicTacTio    作者:DevelopForLizardz    | 项目源码 | 文件源码
def __init__(self, layers=None, fitness=0):
        """
        Create the neural net.
        :param layers: A nested array: [[Input Layer neurons], [Hidden Layer neurons], [Output Layer neurons]] that is
        used to specify layers containing neurons to use instead of creating random ones.
        :param fitness: Used to specify fitness to start out with
        :return: None
        """

        self.NUMINPUT = 10
        self.NUMHIDDEN = 9
        self.NUMOUTPUT = 9
        self.layers = layers if layers is not None else self.create()
        self.inputLayer, self.hiddenLayer, self.outputLayer = self.layers[:]
        self.fitness = fitness  # this is a placeholder for when it is in a population.
        self.mutateChances = [0.05,  # 5% chance of executing mutate task 1
                              47.55,  # 47.5% chance of executing mutate task 2
                              1]  # 47.5% chance of executing mutate task 3
项目:ccvt    作者:inconvergent    | 项目源码 | 文件源码
def sample_from_dens(dens, n):

  m = dens.shape[0]
  res = zeros((n,2),'float')
  k = 0

  while k<n:

    xy = random(2)
    ij = floor(xy*m)
    d = dens[ij[0],ij[1]]
    if random()<d:
      res[k,:] = xy
      k += 1

  return res
项目:smarty    作者:openforcefield    | 项目源码 | 文件源码
def change_bond(self,env,bond):
        """
        Makes changes to the Bond object
        returns probability of making change
        """
        #TODO: if symmetry in change_atom works for AlkEthOH, figure out how to handle bonds
        # Can only make changes to the bond OR or AND types
        changeOR = random.choice([True, False], p = [0.7, 0.3])
        if changeOR:
            # Bonds only have OR bases (no ORdecorators)
            new_prob = self.change_ORbase(bond, self.BondORbases, self.BondORdecorators)
            return 0.7 * new_prob

        else: # change AND type
            new_prob = self.change_ANDdecorators(bond, self.BondANDdecorators)
            return new_prob * 0.3
项目:sand-dunes    作者:inconvergent    | 项目源码 | 文件源码
def get_colors(f, do_shuffle=True):
  from numpy import array
  try:
    import Image
  except Exception:
    from PIL import Image

  im = Image.open(f)
  data = array(list(im.convert('RGB').getdata()),'float')/255.0

  res = []
  for rgb in data:
    res.append(list(rgb))

  if do_shuffle:
    from numpy.random import shuffle
    shuffle(res)
  return res
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_comparison_invalid(self):

        def check(df, df2):

            for (x, y) in [(df, df2), (df2, df)]:
                self.assertRaises(TypeError, lambda: x == y)
                self.assertRaises(TypeError, lambda: x != y)
                self.assertRaises(TypeError, lambda: x >= y)
                self.assertRaises(TypeError, lambda: x > y)
                self.assertRaises(TypeError, lambda: x < y)
                self.assertRaises(TypeError, lambda: x <= y)

        # GH4968
        # invalid date/int comparisons
        df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
        df['dates'] = date_range('20010101', periods=len(df))

        df2 = df.copy()
        df2['dates'] = df['a']
        check(df, df2)

        df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
        df2 = DataFrame({'a': date_range('20010101', periods=len(
            df)), 'b': date_range('20100101', periods=len(df))})
        check(df, df2)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_timestamp_compare(self):
        # make sure we can compare Timestamps on the right AND left hand side
        # GH4982
        df = DataFrame({'dates1': date_range('20010101', periods=10),
                        'dates2': date_range('20010102', periods=10),
                        'intcol': np.random.randint(1000000000, size=10),
                        'floatcol': np.random.randn(10),
                        'stringcol': list(tm.rands(10))})
        df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
        ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
               'ne': 'ne'}
        for left, right in ops.items():
            left_f = getattr(operator, left)
            right_f = getattr(operator, right)

            # no nats
            expected = left_f(df, Timestamp('20010109'))
            result = right_f(Timestamp('20010109'), df)
            assert_frame_equal(result, expected)

            # nats
            expected = left_f(df, Timestamp('nat'))
            result = right_f(Timestamp('nat'), df)
            assert_frame_equal(result, expected)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_arith_non_pandas_object(self):
        df = self.simple

        val1 = df.xs('a').values
        added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
        assert_frame_equal(df + val1, added)

        added = DataFrame((df.values.T + val1).T,
                          index=df.index, columns=df.columns)
        assert_frame_equal(df.add(val1, axis=0), added)

        val2 = list(df['two'])

        added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
        assert_frame_equal(df + val2, added)

        added = DataFrame((df.values.T + val2).T, index=df.index,
                          columns=df.columns)
        assert_frame_equal(df.add(val2, axis='index'), added)

        val3 = np.random.rand(*df.shape)
        added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
        assert_frame_equal(df.add(val3), added)
项目:differential-lattice    作者:inconvergent    | 项目源码 | 文件源码
def spawn_circle(dl, n, xy, dst, rad=0.4):

  from numpy.random import random
  from numpy import pi
  from numpy import column_stack
  from numpy import sin
  from numpy import cos

  num = dl.num
  theta = random(n)*2*pi
  new_xy = xy + column_stack([cos(theta), sin(theta)])*rad
  new_num = len(new_xy)
  if new_num>0:
    dl.xy[num:num+new_num,:] = new_xy

  dl.num += new_num
  return new_num
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def setXY(self, X, y):
        self.X = X
        self.y = y
        row = X.shape[1]
        col = y.shape[1]
        #random.seed(6)
        #self.beta = random.random(size=(row, col))
        #self.beta = 2 * self.beta - 1
        # self.beta=np.loadtxt('../toyData/lmm_tree_beta.csv', delimiter=',') # p.loadtxt('../toyData/group_beta.csv', delimiter=',')
        self.beta=np.zeros((row,col))
        # L1, L2 = np.linalg.eigh(X.T.dot(X))
        # L1 = L1.max()
        #if self.maxEigen is None:
        s= np.linalg.svd(self.X, full_matrices=False)[1]
        L1 = np.max(s)
        L1 = L1*L1
        #else:
            #L1 = self.maxEigen
        self.L = L1
项目:sGLMM    作者:YeWenting    | 项目源码 | 文件源码
def update_edge_vertex_matrix(self):
        self.edge_vertex_matrix = random.random(size=(self.get_num_edges(), self.beta.shape[0]))
        self.edge_vertex_matrix = 2. * self.edge_vertex_matrix - 1
        # self.edge_vertex_matrix=np.zeros((self.get_num_edges(),self.beta.shape[0]))
        num_rows = self.corr_coff.shape[0]
        num_cols = self.corr_coff.shape[1]
        sign = 1
        present_row = 0
        for i in range(0, num_rows):
            for j in range(0, num_cols):
                if self.corr_coff[i, j] == 0:
                    continue
                if self.corr_coff[i, j] < 0:
                    sign = -1
                elif self.corr_coff[i, j] > 0:
                    sign = 1
                for k in range(0, self.beta.shape[0]):
                    if k == i:
                        self.edge_vertex_matrix[present_row, k] = abs(self.corr_coff[i, j]) * self.gamma_flasso
                    elif k == j:
                        self.edge_vertex_matrix[present_row, k] = -sign * abs(self.corr_coff[i, j]) * self.gamma_flasso

                present_row = present_row + 1
项目:pyson    作者:niklasf    | 项目源码 | 文件源码
def do_work(self, term, intention):
        category = random.choice(CATEGORY_NAMES)
        c, d = self.compute_change_probability()

        if schedule.tick >= START_TICK_LAST_STAGE:
            num_deleted = numpy.random.geometric(min(1, 1 / d * 2.2))
            num_updated = numpy.random.geometric(0.35)
            num_created = numpy.random.geometric(min(1, 1 / c))
        else:
            num_deleted = numpy.random.geometric(min(1, 1 / d * 1.6))
            num_updated = numpy.random.geometric(0.425)
            num_created = numpy.random.geometric(min(1, 1 / c * 1.7475))

        changed = []
        self.delete_files(num_deleted, category)
        changed += self.update_files(num_updated, category)
        changed += self.create_files(num_created, category)

        self.create_coupling(changed)

        self.bugfix()
        yield
项目:pyson    作者:niklasf    | 项目源码 | 文件源码
def do_some_work(self):
        random_day = self.get_random_day()
        category = random.choice(CATEGORY_NAMES)
        c, d = self.compute_change_probability()

        if random_day <= self.days:
            if schedule.tick >= START_TICK_LAST_STAGE:
                num_deleted = numpy.random.geometric(min(1, 1 / d * 1.1))
                num_updated = numpy.random.geometric(0.15)
                num_created = numpy.random.geometric(1 / c * 0.95)
            else:
                num_deleted = numpy.random.geometric(min(1, 1 / d))
                num_updated = numpy.random.geometric(0.25)
                num_created = numpy.random.geometric(1 / c * 0.6)

            changed = []
            self.delete_files(num_deleted, category)
            changed += self.update_files(num_updated, category)
            changed += self.create_files(num_created, category)

            self.create_coupling(changed)

            self.bugfix()
        elif random_day <= 6:
            self.intention_bugfix()
项目:pyson    作者:niklasf    | 项目源码 | 文件源码
def do_some_work(self):
        random_day = self.get_random_day()
        category = random.choice(CATEGORY_NAMES)
        c, d = self.compute_change_probability()

        if random_day <= self.days:
            num_deleted = numpy.random.geometric(min(1, 1 / d * 0.975))
            num_updated = numpy.random.geometric(0.25)
            num_created = numpy.random.geometric(1 / c * 0.9)

            changed = []
            self.delete_files(num_deleted, category)
            changed += self.update_files(num_updated, category)
            changed += self.create_files(num_created, category)

            self.create_coupling(changed)

            self.bugfix()
        elif random_day <= 3:
            num_updated = numpy.random.geometric(0.26)
            updated = self.update_files(num_updated, category)
            self.create_coupling(updated)
        elif random_day <= 4:
            self.intention_bugfix()
项目:machineLearning    作者:DiginessForever    | 项目源码 | 文件源码
def __init__(self, folderNames):
        '''This creates a neural network and puts input into it, runs training, and saves the memory/weight matrix'''
        #Constructor for neural network has these parameters:  numberOfLayers, numberOfNeuronsPerLayer, totalClasses - in that order.
        #Each layer will have progressively less neurons from the input layer to the output layer.  The output layer will have only one
        #neuron per type of object it needs to recognize (the class).
        totalClasses = len(folderNames)
        numberOfNeuronsPerLayer = 100
        neuralNetwork = self.NeuralNetwork(10, numberOfNeuronsPerLayer, totalClasses)

        #TO DO - here is the continuance point...


    #This method will create a new network and train it - something to couple training examples with their proper output.
    #I've already gotten the images arranged in folders by classes - the class comes from the Google image search term.
    #So whatever the folder name is, that should be the expected output.  However many folders I have, I should have that many neurons in the
    #output layer.
    #The training manager then would just grab images at random from the various folders, and in calculating the error, it would expect that neuron
    #representing that folder's class to be 1, and the other neurons to be 0.
项目:machineLearning    作者:DiginessForever    | 项目源码 | 文件源码
def trainToConvergence():
        network = NeuralNetwork()
        network.addLayer(inputLayer)

        for x in range(inputLayer.len, 2, 100):
            network.addLayer(x)

        #Loop until error is under a given threshold or time spent is over a given threshold:
        #2. Run imageConvert.py in order to get an image (or batch) from a random folder.
        #3. Set the expected output to be the output neuron for that folder/class = 1, and all other output neurons 0.
        #4. Move the image into the input layer (imageConvert.py should have it ready to go).
        #5. Forward propagate.
        #6. Calculate error (easy, considering #3).
        #7. Print error.
        #8. Backpropagate.
        #When finished looping, save the weights.

    #Once I have a network trained and the weights saved to file, I can grab the weights and do
    #classifying on an arbitrary image to see if it matches any of my trained classes:
项目:alib    作者:vnep-approx    | 项目源码 | 文件源码
def _calculate_average_resource_demands(self):
        connection_probability = self._raw_parameters["probability"]
        min_number_nodes = self._raw_parameters["min_number_of_nodes"]
        max_number_nodes = self._raw_parameters["max_number_of_nodes"]
        number_of_requests = self._raw_parameters["number_of_requests"]
        node_res = self._raw_parameters["node_resource_factor"]
        edge_res_factor = self._raw_parameters["edge_resource_factor"]

        average_number_of_nodes_in_core = ((min_number_nodes + max_number_nodes) / 2.0)

        expected_number_of_request_nodes = (float(number_of_requests) * (2 + average_number_of_nodes_in_core))  # add 2 for source & sink
        expected_number_of_request_edges = (float(number_of_requests) * (
            # edges of the main service chain:
            (average_number_of_nodes_in_core - 1 + 2) +
            # edges from random connections:
            connection_probability * (average_number_of_nodes_in_core * (average_number_of_nodes_in_core - 2) + 1)
        ))

        # TODO: this code assumes that all node types are evenly distributed in the request!
        expected_number_of_request_nodes_per_node_type = expected_number_of_request_nodes / float(len(self._node_types))
        self.average_request_node_resources_per_type = {}
        for node_type in self._node_types:
            self.average_request_node_resources_per_type[node_type] = node_res * (self._substrate.get_total_node_resources(node_type) / expected_number_of_request_nodes_per_node_type)

        self.average_request_edge_resources = (1.0 / edge_res_factor) * self._substrate.get_total_edge_resources() / expected_number_of_request_edges
项目:alib    作者:vnep-approx    | 项目源码 | 文件源码
def _add_cactus_edges(self, req):
        sub_trees = [(req.graph["root"], list(req.nodes))]
        cycles = 0
        edges_on_cycle = set()
        while sub_trees and (cycles < self._raw_parameters["max_cycles"]):
            cycles += 1
            root_node, sub_tree = sub_trees.pop()
            i = random.choice(sub_tree)
            j = random.choice(sub_tree)
            while i == j or (i in req.get_out_neighbors(j)) or (j in req.get_out_neighbors(i)):
                i = random.choice(sub_tree)
                j = random.choice(sub_tree)
            if req.node[i]["layer"] > req.node[j]["layer"]:
                i, j = j, i  # make edges always point down the tree
            if random.random() < self._raw_parameters["probability"]:
                req.add_edge(i, j, self._edge_demand)
                edges_on_cycle.add((i, j))

                path_i = CactusRequestGenerator._path_to_root(req, i, root_node)
                path_j = CactusRequestGenerator._path_to_root(req, j, root_node)
                new_cycle = path_i.symmetric_difference(path_j)  # only edges on the path to the first common ancestor lie on cycle
                edges_on_cycle = edges_on_cycle.union(new_cycle)

                sub_trees = CactusRequestGenerator._list_nontrivial_allowed_subtrees(req, edges_on_cycle)
                random.shuffle(sub_trees)
项目:alib    作者:vnep-approx    | 项目源码 | 文件源码
def _empirical_number_of_nodes_edges(self):
        total_nodes = 0
        total_edges = 0
        self._node_demand_by_type = {nt: 0.0 for nt in self._node_types}
        self._edge_demand = 0.0
        r_state = random.getstate()
        iterations = self._raw_parameters["iterations"]
        for i in xrange(iterations):
            req = self._generate_tree("test")
            self._add_cactus_edges(req)
            # print len(req.nodes)
            total_nodes += len(req.nodes)
            total_edges += len(req.edges)
        random.setstate(r_state)
        total_nodes /= float(iterations)
        total_edges /= float(iterations)
        self.logger.info("Expecting {} nodes, {} edges".format(total_nodes, total_edges))
        return total_nodes, total_edges
项目:alib    作者:vnep-approx    | 项目源码 | 文件源码
def generate_and_apply_profits(self, scenario, raw_parameters):
        self._scenario = scenario
        self._iterations = raw_parameters["iterations"]
        self.logger.info("Calculating vnet profits based on random embedding ({} iterations)".format(self._iterations))

        if not self._scenario.substrate.shortest_paths_costs:
            self._scenario.substrate.initialize_shortest_paths_costs()

        for req in self._scenario.requests:
            cost = self._get_average_cost_from_embedding_graph_randomly(
                req, self._scenario.substrate.shortest_paths_costs
            )
            req.profit = -cost * raw_parameters["profit_factor"]
            self.logger.debug("\t{}\t{}".format(req.name, req.profit))

        self._iterations = None
        self._scenario = None
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def test_FFT(FFT):
    N = FFT.N
    if FFT.rank == 0:
        A = random(N).astype(FFT.float)
        if FFT.communication == 'AlltoallN':
            C = empty(FFT.global_complex_shape(), dtype=FFT.complex)
            C = rfftn(A, C, axes=(0,1,2))
            C[:, :, -1] = 0  # Remove Nyquist frequency
            A = irfftn(C, A, axes=(0,1,2))
        B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
        B2 = rfftn(A, B2, axes=(0,1,2))

    else:
        A = zeros(N, dtype=FFT.float)
        B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)

    atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
    FFT.comm.Bcast(A, root=0)
    FFT.comm.Bcast(B2, root=0)

    a = zeros(FFT.real_shape(), dtype=FFT.float)
    c = zeros(FFT.complex_shape(), dtype=FFT.complex)
    a[:] = A[FFT.real_local_slice()]
    c = FFT.fftn(a, c)
    #print abs((c - B2[FFT.complex_local_slice()])/c.max()).max()
    assert all(abs((c - B2[FFT.complex_local_slice()])/c.max()) < rtol)
    #assert allclose(c, B2[FFT.complex_local_slice()], rtol, atol)
    a = FFT.ifftn(c, a)
    #print abs((a - A[FFT.real_local_slice()])/a.max()).max()

    assert all(abs((a - A[FFT.real_local_slice()])/a.max()) < rtol)
    #assert allclose(a, A[FFT.real_local_slice()], rtol, atol)
项目:ochem_predict_nn    作者:connorcoley    | 项目源码 | 文件源码
def main(db_fpath, N = 15):
    '''Read reactions from Lowe's patent reaction SMILES'''

    try:
        # Open file
        file_generator = get_reaction_file(db_fpath)
        print(file_generator)
        documents = []
        for i, rxn in enumerate(file_generator):
            if i == N:
                break

            print('~~~~~~~ {} ~~~~~~'.format(i))
            print('{}: {}'.format(i, rxn))
            document = minidom.parse(rxn)
            try:
                dic = doc_to_dic(document)
                dic['random'] = random()
                documents.append(dic)
            except ValueError as e:
                print(e)

            # Report progress and insert every 1000
            if ((i+1) % 1000) == 0:
                print('{}/{}'.format(i+1, N))
                result = collection.insert(documents)
                documents = []

        if documents: result = collection.insert(documents)
    except KeyboardInterrupt:
        print('Stopped early!')     

    print('Created {} database entries'.format(collection.find().count()))

    return True
项目:automata    作者:inconvergent    | 项目源码 | 文件源码
def _diminish(self, prob):
    # ii,jj = logical_and(self.connected>7, self.grid).nonzero()
    # self.grid[ii, jj] = False

    ii,jj = logical_and(self.neigh>self.crowded_limit, self.grid).nonzero()
    self.grid[ii, jj] = False

    # diminish_mask = random(size=len(ii))<prob
    # self.grid[ii[diminish_mask], jj[diminish_mask]] = False

    # ii,jj = self.grid.nonzero()
    # diminish_mask = random(size=len(ii))<0.01
    # self.grid[ii[diminish_mask], jj[diminish_mask]] = False
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def test_coder(coder, sample, corruption=.3, block_corruption=.2, random_seed=123,
               **kwargs):
    """
    Test a neural network on MNIST sample. Returns a list of
    (input, nn output) images.

    coder:
    Trained neural network object, that supports the recode() method.

    sample:
    MNIST sample for testing, e.g. as returned by mnist_sample.

    corruption:
    Perform a test after corrupting sample by this fraction of pixels,
    which are randomly picked and set to 0.

    block_corruption:
    As before, but corruption is a random rectangle of this size.
    """

    if random_seed is not None: np.random.seed(random_seed)
    if type(sample) == list:
        return [(s, coder.recode(s[0], **kwargs).eval()) for s in sample]

    results = [(sample, coder.recode(sample, **kwargs).eval())]

    if corruption is not None:
        corrupted = train.corrupt(sample, corruption)
        results.append((corrupted, coder.recode(corrupted, **kwargs).eval()))

    if block_corruption is not None:
        corrupted = block_corrupt(sample, block_corruption)
        results.append((corrupted, coder.recode(corrupted, **kwargs).eval()))

    return results
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def block_corrupt(dataX, corruption_level=.1):
    """
    Return a copy of dataX MNIST images after corrupting each row with
    a rectangle of size corruption_level.
    """

    count = len(dataX)
    size = dataX[0].size
    length = int(np.sqrt(size))
    corrupt_area = corruption_level * size

    breadths = randint(1, int(np.sqrt(corrupt_area)), count)
    lengths = (corrupt_area / breadths).astype(int)
    switch = randint(0, 2, count)
    breadths[switch==0] = lengths[switch==0]
    lengths = (corrupt_area / breadths).astype(int)

    loc_x = randint(0, length, count)
    loc_y = randint(0, length, count)

    corruptX = np.zeros(dataX.shape, dtype=dataX.dtype)
    for i, img in enumerate(dataX):
        bi, li = breadths[i], lengths[i]
        ind_x = np.arange(loc_x[i], loc_x[i] + bi, dtype=int) % length
        ind_y = np.arange(loc_y[i], loc_y[i] + li, dtype=int) % length
        corrupted = img.copy().reshape((length, length))
        corrupted[(np.tile(ind_x, li),
                   np.repeat(ind_y, bi))] = random(bi * li)
#                                         = np.zeros(bi * li)
        corruptX[i] = corrupted.reshape(img.shape)

    return corruptX
项目:fracture-cuda    作者:inconvergent    | 项目源码 | 文件源码
def blow(self, n, xy):
    a = random(size=n)*TWOPI
    dxy = column_stack((
        cos(a),
        sin(a)
        ))

    new_nodes = self._add_nodes(xy)
    self._add_fracs(dxy, new_nodes)
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def get_word_generator():
  def word_generator():
    while True:
      word = []
      while random()>0.15:
        r = (0.9 + random()*1.1)*GLYPH_WIDTH
        word.append(r)
      if len(word)>2:
        yield word
  return word_generator
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _interpolate_write_with_cursive(glyphs, inum, theta, noise, offset_size):
  stack = row_stack(glyphs)
  ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
  gamma = theta + cumsum((1.0-2.0*random(len(ig)))*noise)
  dd = column_stack((cos(gamma), sin(gamma)))*offset_size
  a = ig + dd
  b = ig + dd[:,::-1]*array((1,-1))

  return a, b