Python numpy.random 模块,randint() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.random.randint()

项目:IRL-maxent    作者:harpribot    | 项目源码 | 文件源码
def optimal_policy(self, state_int):
        """
        The optimal policy for this gridworld.

        state_int: What state we are in. int.
        -> Action int.
        """

        sx, sy = self.int_to_point(state_int)

        if sx < self.grid_size and sy < self.grid_size:
            return rn.randint(0, 2)
        if sx < self.grid_size-1:
            return 0
        if sy < self.grid_size-1:
            return 1
        raise ValueError("Unexpected state.")
项目:sand-glyphs    作者:inconvergent    | 项目源码 | 文件源码
def _get_glyph(gnum, height, width, shift_prob, shift_size):
  if isinstance(gnum, list):
    n = randint(*gnum)
  else:
    n = gnum

  glyph = random_points_in_circle(
      n, 0, 0, 0.5
      )*array((width, height), 'float')
  _spatial_sort(glyph)

  if random()<shift_prob:
    shift = ((-1)**randint(0,2))*shift_size*height
    glyph[:,1] += shift
  if random()<0.5:
    ii = randint(0,n-1,size=(1))
    xy = glyph[ii,:]
    glyph = row_stack((glyph, xy))


  return glyph
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def random_walk_rec(current, trace, length, successor_fn): 
    import numpy.random as random
    if length == 0:
        return current
    else:
        sucs = successor_fn(current)
        first = random.randint(len(sucs))
        now = first

        while True:
            suc = sucs[now]
            try:
                assert not np.any([np.all(np.equal(suc, t)) for t in trace])
                result = random_walk_rec(suc, [*trace, suc], length-1, successor_fn)
                assert result is not None
                return result
            except AssertionError:
                now = (now+1)%len(sucs)
                if now == first:
                    print("B",end="")
                    return None
                else:
                    continue
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def main():

  from numpy.random import random
  from numpy.random import randint

  from iutils.render import Render
  from modules.linear import Linear

  render = Render(SIZE, BACK, FRONT)
  render.clear_canvas()

  nsteps = 500
  height = 1.0

  for i in range(20):

    start = random(size=(1,2))
    start_w = 0
    grains = randint(20,150)
    scale = 0.005 + random()*0.02
    L = Linear(SIZE, height, start, start_w)
    L.steps(nsteps, scale=scale)
    show(render, L, grains)

  render.write_to_png('./linear.png')
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def _random_overlay(self, static_hidden=False):
        """Construct random max pool locations."""

        s = self.shapes[2]

        if static_hidden:
            args = np.random.randint(s[2], size=np.prod(s) / s[2] / s[4])
            overlay = np.zeros(np.prod(s) / s[4], np.bool)
            overlay[args + np.arange(len(args)) * s[2]] = True
            overlay = overlay.reshape([s[0], s[1], s[3], s[2]])
            overlay = np.rollaxis(overlay, -1, 2)
            return arrays.extend(overlay, s[4])
        else:
            args = np.random.randint(s[2], size=np.prod(s) / s[2])
            overlay = np.zeros(np.prod(s), np.bool)
            overlay[args + np.arange(len(args)) * s[2]] = True
            overlay = overlay.reshape([s[0], s[1], s[3], s[4], s[2]])
            return np.rollaxis(overlay, -1, 2)
项目:iCount    作者:tomazc    | 项目源码 | 文件源码
def make_fasta_file(sequences=None, headers=None, out_file=None, num_sequences=10, seq_len=80,
                    rnd_seed=None):
    """Make artificial FASTA file."""
    random.seed(rnd_seed)  # pylint:disable=no-member
    if sequences is None and headers is None:
        headers = ['{}'.format(i + 1) for i in range(num_sequences)]
        random_seeds = random.randint(10**5, size=num_sequences)  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif sequences is None:
        random_seeds = random.randint(10**5, size=len(headers))  # pylint:disable=no-member
        sequences = [make_sequence(seq_len, rnd_seed=rnd) for rnd in random_seeds]
    elif headers is None:
        headers = ['{}'.format(i + 1) for i in range(len(sequences))]

    if out_file is None:
        out_file = get_temp_file_name(extension='fasta')
    with open(out_file, 'wt') as ofile:
        for header, seq in zip(headers, sequences):
            ofile.write('>' + header + '\n')
            ofile.write(seq + '\n')

    return os.path.abspath(out_file)
项目:spark-deep-learning    作者:databricks    | 项目源码 | 文件源码
def test_identity_module(self):
        """ identity module should preserve input """

        with IsolatedSession() as issn:
            pred_input = tf.placeholder(tf.float32, [None, None])
            final_output = tf.identity(pred_input, name='output')
            gfn = issn.asGraphFunction([pred_input], [final_output])

        for _ in range(10):
            m, n = prng.randint(10, 1000, size=2)
            mat = prng.randn(m, n).astype(np.float32)
            with IsolatedSession() as issn:
                feeds, fetches = issn.importGraphFunction(gfn)
                mat_out = issn.run(fetches[0], {feeds[0]: mat})

            self.assertTrue(np.all(mat_out == mat))
项目:py-graphart    作者:dandydarcy    | 项目源码 | 文件源码
def _init_edges(self):
        '''
        Initialize random edges filling the FSs struct
        '''
        if self.acyclic:
            self._init_acyclic_edges()
            return
        for i in range(len(self.nodes)):
            # rnd.randint(, self.max_neighbours+1)
            neighbours = self.max_neighbours
            query_res = self.nodes_tree.query(
                self.nodes[i], k=neighbours+1)

            self.FSs[i] = query_res[1][1:]
            for node_index in self.FSs[i]:
                self.BSs[node_index] = np.append(self.BSs[node_index], [i])

            self.FS_costs[i] = query_res[0][1:]
项目:action-detection    作者:yjxiong    | 项目源码 | 文件源码
def _sample_indices(self, valid_length, num_seg):
        """

        :param record: VideoRecord
        :return: list
        """

        average_duration = (valid_length + 1) // num_seg
        if average_duration > 0:
            # normal cases
            offsets = np.multiply(list(range(num_seg)), average_duration) \
                             + randint(average_duration, size=num_seg)
        elif valid_length > num_seg:
            offsets = np.sort(randint(valid_length, size=num_seg))
        else:
            offsets = np.zeros((num_seg, ))

        return offsets
项目:EndemicPy    作者:j-i-l    | 项目源码 | 文件源码
def initiate_infection(self, strain, ):
        """
        Function to set the initial seed for an infection.

        Arguments:
            - strains: dict, key the name of a strain, value a list of node id's or
                'random'. If the value is 'random' then one random host is infected.
                Eg. strain = {'wild_type':[1,5,10]}: infects _hosts 1,5 and 10 with the
                    wild type strain.
        """
        self.t = 0
        for name in strain:
            if name not in self.pathogen.ids.keys():
                raise self.WrongPathogenError("There is no pathogen strain with the name <%s>." % name)
            if type(strain[name]) is not str:
                for node_id in strain[name]:
                    self.current_view[node_id] = self.pathogen.ids[name]
            else:
                self.current_view[nrand.randint(0, self.contact_structure.n)] = self.pathogen.ids[name]
            self._init_queue()
        return 0

    # unused method can be removed (along with self.initiate_infection)
项目:PyGraphArt    作者:dnlcrl    | 项目源码 | 文件源码
def _init_edges(self):
        '''
        Initialize random edges filling the FSs struct
        '''
        if self.acyclic:
            self._init_acyclic_edges()
            return
        for i in range(len(self.nodes)):
            # rnd.randint(, self.max_neighbours+1)
            neighbours = self.max_neighbours
            query_res = self.nodes_tree.query(
                self.nodes[i], k=neighbours+1)

            self.FSs[i] = query_res[1][1:]
            for node_index in self.FSs[i]:
                self.BSs[node_index] = np.append(self.BSs[node_index], [i])

            self.FS_costs[i] = query_res[0][1:]
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def sample(self, batch_size):
        """
    computes (x_t,u_t,x_{t+1}) pair
    returns tuple of 3 ndarrays with shape
    (batch,x_dim), (batch, u_dim), (batch, x_dim)
    """
        if not self.initialized:
            raise ValueError(
                "Dataset not loaded - call PlaneData.initialize() first.")
        traj = randint(0, num_t, size=batch_size)  # which trajectory
        tt = randint(0, T - 1, size=batch_size)  # time step t for each batch
        X0 = np.zeros((batch_size, x_dim))
        U0 = np.zeros((batch_size, u_dim), dtype=np.int)
        X1 = np.zeros((batch_size, x_dim))
        for i in range(batch_size):
            t = tt[i]
            p = self.P[traj[i], t, :]
            X0[i, :] = self.getX(traj[i], t)
            X1[i, :] = self.getX(traj[i], t + 1)
            U0[i, :] = self.U[traj[i], t, :]
        return (X0, U0, X1)
项目:Neural-Chatbot    作者:saurabhmathur96    | 项目源码 | 文件源码
def next_batch(self):
        inverse_vocabulary = self.inverse_vocabulary
        if self.stream:
            q = [[inverse_vocabulary[word] for word in next(self.questions).strip().split() ] for i in range(self.batch_size)]
            a = [[inverse_vocabulary[word] for word in next(self.answers).strip().split() ] for i in range(self.batch_size)]
        else:
            n_example = len(self.answers)
            indices = random.randint(0, n_example, size=(self.batch_size))
            q = [[inverse_vocabulary[word] for word in self.questions[i].split()] for i in indices]
            a = [[inverse_vocabulary[word] for word in self.answers[i].split()] for i in indices]

        X = pad_sequences(q, maxlen=self.sequence_length)
        y = pad_sequences(a, maxlen=self.sequence_length)

        if self.one_hot_target:
            return (X, self.to_one_hot(y))
        else:
            return (X, y)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def test_long_sequence():
    N_iters = 200
    vect_length = 10
    Ns = []
    Ms = []
    for i in range(N_iters):
        Ms.append(npr.randint(200) + 1)
        Ns.append(npr.randint(Ms[-1], size=vect_length))
    store = BitStore(vect_length)
    coinflips = npr.rand(N_iters)
    new_Ns = []
    for N, M, r in zip(Ns, Ms, coinflips):
        if r < 0.75:
            store.push(N, M)
        else:
            new_Ns.append(store.pop(M))

    for N, M, r in zip(Ns, Ms, coinflips)[::-1]:
        if r < 0.75:
            cur_N = store.pop(M)
            assert np.all(cur_N == N)
        else:
            store.push(new_Ns.pop(), M)
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def layer_test_helper_1d_global(layer, channel_index):
    # This should test that the output is the correct shape so it should pass
    # into a Dense layer rather than a Conv layer.
    # The weighted layer is the previous layer,
    # Create model
    main_input = Input(shape=list(random.randint(10, 20, size=2)))
    x = Conv1D(3, 3)(main_input)
    x = layer(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    del_layer_index = 1
    next_layer_index = 3
    del_layer = model.layers[del_layer_index]
    new_model = operations.delete_channels(model, del_layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], channel_index, axis=0)

    assert weights_equal(correct_w, new_w)
项目:keras-surgeon    作者:BenWhetton    | 项目源码 | 文件源码
def layer_test_helper_2d_global(layer, channel_index, data_format):
    # This should test that the output is the correct shape so it should pass
    # into a Dense layer rather than a Conv layer.
    # The weighted layer is the previous layer,
    # Create model
    main_input = Input(shape=list(random.randint(10, 20, size=3)))
    x = Conv2D(3, [3, 3], data_format=data_format)(main_input)
    x = layer(x)
    main_output = Dense(5)(x)
    model = Model(inputs=main_input, outputs=main_output)

    # Delete channels
    del_layer_index = 1
    next_layer_index = 3
    del_layer = model.layers[del_layer_index]
    new_model = operations.delete_channels(model, del_layer, channel_index)
    new_w = new_model.layers[next_layer_index].get_weights()

    # Calculate next layer's correct weights
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    correct_w = model.layers[next_layer_index].get_weights()
    correct_w[0] = np.delete(correct_w[0], channel_index, axis=0)

    assert weights_equal(correct_w, new_w)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_frame_negate(self):
        expr = self.ex('-')

        # float
        lhs = DataFrame(randn(5, 2))
        expect = -lhs
        result = pd.eval(expr, engine=self.engine, parser=self.parser)
        assert_frame_equal(expect, result)

        # int
        lhs = DataFrame(randint(5, size=(5, 2)))
        expect = -lhs
        result = pd.eval(expr, engine=self.engine, parser=self.parser)
        assert_frame_equal(expect, result)

        # bool doesn't work with numexpr but works elsewhere
        lhs = DataFrame(rand(5, 2) > 0.5)
        if self.engine == 'numexpr':
            with tm.assertRaises(NotImplementedError):
                result = pd.eval(expr, engine=self.engine, parser=self.parser)
        else:
            expect = -lhs
            result = pd.eval(expr, engine=self.engine, parser=self.parser)
            assert_frame_equal(expect, result)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_series_negate(self):
        expr = self.ex('-')

        # float
        lhs = Series(randn(5))
        expect = -lhs
        result = pd.eval(expr, engine=self.engine, parser=self.parser)
        assert_series_equal(expect, result)

        # int
        lhs = Series(randint(5, size=5))
        expect = -lhs
        result = pd.eval(expr, engine=self.engine, parser=self.parser)
        assert_series_equal(expect, result)

        # bool doesn't work with numexpr but works elsewhere
        lhs = Series(rand(5) > 0.5)
        if self.engine == 'numexpr':
            with tm.assertRaises(NotImplementedError):
                result = pd.eval(expr, engine=self.engine, parser=self.parser)
        else:
            expect = -lhs
            result = pd.eval(expr, engine=self.engine, parser=self.parser)
            assert_series_equal(expect, result)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_grouper_multilevel_freq(self):

        # GH 7885
        # with level and freq specified in a pd.Grouper
        from datetime import date, timedelta
        d0 = date.today() - timedelta(days=14)
        dates = date_range(d0, date.today())
        date_index = pd.MultiIndex.from_product(
            [dates, dates], names=['foo', 'bar'])
        df = pd.DataFrame(np.random.randint(0, 100, 225), index=date_index)

        # Check string level
        expected = df.reset_index().groupby([pd.Grouper(
            key='foo', freq='W'), pd.Grouper(key='bar', freq='W')]).sum()
        # reset index changes columns dtype to object
        expected.columns = pd.Index([0], dtype='int64')

        result = df.groupby([pd.Grouper(level='foo', freq='W'), pd.Grouper(
            level='bar', freq='W')]).sum()
        assert_frame_equal(result, expected)

        # Check integer level
        result = df.groupby([pd.Grouper(level=0, freq='W'), pd.Grouper(
            level=1, freq='W')]).sum()
        assert_frame_equal(result, expected)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_apply_frame_concat_series(self):
        def trans(group):
            return group.groupby('B')['C'].sum().sort_values()[:2]

        def trans2(group):
            grouped = group.groupby(df.reindex(group.index)['B'])
            return grouped.sum().sort_values()[:2]

        df = DataFrame({'A': np.random.randint(0, 5, 1000),
                        'B': np.random.randint(0, 5, 1000),
                        'C': np.random.randn(1000)})

        result = df.groupby('A').apply(trans)
        exp = df.groupby('A')['C'].apply(trans2)
        assert_series_equal(result, exp, check_names=False)
        self.assertEqual(result.name, 'C')
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_apply_corner_cases(self):
        # #535, can't use sliding iterator

        N = 1000
        labels = np.random.randint(0, 100, size=N)
        df = DataFrame({'key': labels,
                        'value1': np.random.randn(N),
                        'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})

        grouped = df.groupby('key')

        def f(g):
            g['value3'] = g['value1'] * 2
            return g

        result = grouped.apply(f)
        self.assertTrue('value3' in result)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_fast_apply(self):
        # make sure that fast apply is correctly called
        # rather than raising any kind of error
        # otherwise the python path will be callsed
        # which slows things down
        N = 1000
        labels = np.random.randint(0, 2000, size=N)
        labels2 = np.random.randint(0, 3, size=N)
        df = DataFrame({'key': labels,
                        'key2': labels2,
                        'value1': np.random.randn(N),
                        'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})

        def f(g):
            return 1

        g = df.groupby(['key', 'key2'])

        grouper = g.grouper

        splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
        group_keys = grouper._get_group_keys()

        values, mutated = splitter.fast_apply(f, group_keys)
        self.assertFalse(mutated)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_groupby_categorical_index(self):

        levels = ['foo', 'bar', 'baz', 'qux']
        codes = np.random.randint(0, 4, size=20)
        cats = Categorical.from_codes(codes, levels, ordered=True)
        df = DataFrame(
            np.repeat(
                np.arange(20), 4).reshape(-1, 4), columns=list('abcd'))
        df['cats'] = cats

        # with a cat index
        result = df.set_index('cats').groupby(level=0).sum()
        expected = df[list('abcd')].groupby(cats.codes).sum()
        expected.index = CategoricalIndex(
            Categorical.from_codes(
                [0, 1, 2, 3], levels, ordered=True), name='cats')
        assert_frame_equal(result, expected)

        # with a cat column, should produce a cat index
        result = df.groupby('cats').sum()
        expected = df[list('abcd')].groupby(cats.codes).sum()
        expected.index = CategoricalIndex(
            Categorical.from_codes(
                [0, 1, 2, 3], levels, ordered=True), name='cats')
        assert_frame_equal(result, expected)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test__cython_agg_general(self):
        ops = [('mean', np.mean),
               ('median', np.median),
               ('var', np.var),
               ('add', np.sum),
               ('prod', np.prod),
               ('min', np.min),
               ('max', np.max),
               ('first', lambda x: x.iloc[0]),
               ('last', lambda x: x.iloc[-1]), ]
        df = DataFrame(np.random.randn(1000))
        labels = np.random.randint(0, 50, size=1000).astype(float)

        for op, targop in ops:
            result = df.groupby(labels)._cython_agg_general(op)
            expected = df.groupby(labels).agg(targop)
            try:
                tm.assert_frame_equal(result, expected)
            except BaseException as exc:
                exc.args += ('operation: %s' % op, )
                raise
项目:pyshgp    作者:erp12    | 项目源码 | 文件源码
def simplify_once(genome):
    """Silences or noops between 1 and 3 random genes.

    Parameters
    ----------
    genome : list of Genes
        List of Plush genes.
    """
    gn = deepcopy(genome)
    n = randint(1, 4)
    action = choice(['silent', 'noop'])
    if action == 'silent':
        silent_n_random_genes(gn, n)
    else:
        noop_n_random_genes(gn, n)
    return gn
项目:rca-evaluation    作者:sieve-microservices    | 项目源码 | 文件源码
def _kshape(x, k, initial_clustering=None):
    """
    >>> from numpy.random import seed; seed(0)
    >>> _kshape(np.array([[1,2,3,4], [0,1,2,3], [-1,1,-1,1], [1,2,2,3]]), 2)
    (array([0, 0, 1, 0]), array([[-1.2244258 , -0.35015476,  0.52411628,  1.05046429],
           [-0.8660254 ,  0.8660254 , -0.8660254 ,  0.8660254 ]]))
    """
    m = x.shape[0]

    if initial_clustering is not None:
        assert len(initial_clustering) == m, "Initial assigment does not match column length"
        idx = initial_clustering
    else:
        idx = randint(0, k, size=m)

    print(idx)

    centroids = np.zeros((k,x.shape[1]))
    distances = np.empty((m, k))

    for _ in range(100):
        old_idx = idx
        for j in range(k):
            centroids[j] = _extract_shape(idx, x, j, centroids[j])

        for i in range(m):
             for j in range(k):
                 distances[i,j] = 1 - max(_ncc_c(x[i], centroids[j]))
        idx = distances.argmin(1)
        if np.array_equal(old_idx, idx):
            break

    print(idx)

    return idx, centroids
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def get_minibatch(roidb, num_classes):
  """Given a roidb, construct a minibatch sampled from it."""
  num_images = len(roidb)
  # Sample random scales to use for each image in this batch
  random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                  size=num_images)
  assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
    'num_images ({}) must divide BATCH_SIZE ({})'. \
    format(num_images, cfg.TRAIN.BATCH_SIZE)

  # Get the input image blob, formatted for caffe
  im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

  blobs = {'data': im_blob}

  assert len(im_scales) == 1, "Single batch only"
  assert len(roidb) == 1, "Single batch only"

  # gt boxes: (x1, y1, x2, y2, cls)
  if cfg.TRAIN.USE_ALL_GT:
    # Include all ground truth boxes
    gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
  else:
    # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' 
    gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
  gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
  gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
  gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
  blobs['gt_boxes'] = gt_boxes
  blobs['im_info'] = np.array(
    [im_blob.shape[1], im_blob.shape[2], im_scales[0]],
    dtype=np.float32)

  return blobs
项目:automata    作者:inconvergent    | 项目源码 | 文件源码
def get_initial(num=10, shift=2):
  from numpy import zeros
  from numpy.random import randint
  init = zeros((GRID_SIZE, GRID_SIZE), 'bool')

  mid = int(GRID_SIZE/2)

  init[mid-shift:mid+shift,mid-shift:mid+shift] = True
  xx = randint(mid-shift,mid+shift, size=(num))
  yy = randint(mid-shift,mid+shift, size=(num))
  init[xx,yy] = False

  return init
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def corrupt(dataset, corruption):
    """
    Return a corrupted copy of the input dataset.

    corruption: (between 0.0 and 1.0)
    Fraction of dataset randomly set to mean value of the dataset.
    """

    corrupted = dataset.flatten()
    size = corrupted.size

    corrupted[randint(size,
                      size=int(corruption * size))] = np.mean(corrupted)

    return corrupted.reshape(dataset.shape)
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def block_corrupt(dataX, corruption_level=.1):
    """
    Return a copy of dataX MNIST images after corrupting each row with
    a rectangle of size corruption_level.
    """

    count = len(dataX)
    size = dataX[0].size
    length = int(np.sqrt(size))
    corrupt_area = corruption_level * size

    breadths = randint(1, int(np.sqrt(corrupt_area)), count)
    lengths = (corrupt_area / breadths).astype(int)
    switch = randint(0, 2, count)
    breadths[switch==0] = lengths[switch==0]
    lengths = (corrupt_area / breadths).astype(int)

    loc_x = randint(0, length, count)
    loc_y = randint(0, length, count)

    corruptX = np.zeros(dataX.shape, dtype=dataX.dtype)
    for i, img in enumerate(dataX):
        bi, li = breadths[i], lengths[i]
        ind_x = np.arange(loc_x[i], loc_x[i] + bi, dtype=int) % length
        ind_y = np.arange(loc_y[i], loc_y[i] + li, dtype=int) % length
        corrupted = img.copy().reshape((length, length))
        corrupted[(np.tile(ind_x, li),
                   np.repeat(ind_y, bi))] = random(bi * li)
#                                         = np.zeros(bi * li)
        corruptX[i] = corrupted.reshape(img.shape)

    return corruptX
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def get_minibatch(roidb, num_classes):
    """Given a roidb, construct a minibatch sampled from it."""
    num_images = len(roidb)
    # Sample random scales to use for each image in this batch
    random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                                    size=num_images)

    # Get the input image blob, formatted for caffe
    im_blob, im_scales, im_shapes = _get_image_blob(roidb, random_scale_inds)

    # Now, build the region of interest and label blobs
    rois_blob = np.zeros((0, 5), dtype=np.float32)
    labels_blob = np.zeros((0, 20), dtype=np.float32)
    # bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
    # bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
    # all_overlaps = []
    for im_i in xrange(num_images):
        labels, im_rois = _sample_rois(roidb[im_i], num_classes)

        # Add to RoIs blob
        rois = _project_im_rois(im_rois, im_scales[im_i])
        batch_ind = im_i * np.ones((rois.shape[0], 1))
        rois_blob_this_image = np.hstack((batch_ind, rois))
        rois_blob = np.vstack((rois_blob, rois_blob_this_image))

        # Add to labels, bbox targets, and bbox loss blobs
        labels_blob = np.vstack((labels_blob, labels))
        # all_overlaps = np.hstack((all_overlaps, overlaps))

    # For debug visualizations
    # _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
    blobs = {'data': im_blob,
             'rois': rois_blob,
             'labels': labels_blob,
             'shapes': im_shapes}

    return blobs
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def _default_asg_transition_probabilities(grapheme_set_size: int) -> ndarray:
        asg_transition_probabilities = random.randint(1, 15,
                                                      (grapheme_set_size + 1, grapheme_set_size + 1))
        zero_array = zeros(grapheme_set_size + 1)
        asg_transition_probabilities[0] = zero_array
        asg_transition_probabilities[:, 0] = zero_array
        # sum up each column, add dummy 1 in front for easier division later
        transition_norms = concatenate(([1], asg_transition_probabilities[:, 1:].sum(axis=0)))
        asg_transition_probabilities = asg_transition_probabilities / transition_norms
        return asg_transition_probabilities
项目:speechless    作者:JuliusKunze    | 项目源码 | 文件源码
def _default_asg_initial_probabilities(grapheme_set_size: int) -> ndarray:
        asg_initial_probabilities = random.randint(1, 15, grapheme_set_size + 1)
        asg_initial_probabilities[0] = 0
        asg_initial_probabilities = asg_initial_probabilities / asg_initial_probabilities.sum()
        # N.B. beware that initial_logprobs[0] is now -inf, NOT 0!
        return asg_initial_probabilities
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def build_instance():
    instance = dict(x=[], y=[], source=[], target=[], target_c=[], rule_id=[], rule=[])
    for k in xrange(num):
        source = rules['source'][k]
        target = rules['target'][k]

        for j in xrange(repeats):
            X  = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)
            Y  = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)
            S  = []
            T  = []
            for w in source:
                if w is 'X':
                    S += [ftr(v) for v in X]
                elif w is 'Y':
                    S += [ftr(v) for v in Y]
                else:
                    S += [w]

            for w in target:
                if w is 'X':
                    T += [ftr(v) for v in X]
                elif w is 'Y':
                    T += [ftr(v) for v in Y]
                else:
                    T += [w]

            A  = [word2idx[w] for w in S]
            B  = [word2idx[w] for w in T]
            C  = [0 if w not in S else S.index(w) + Lmax for w in T]

            instance['x']        += [S]
            instance['y']        += [T]
            instance['source']   += [A]
            instance['target']   += [B]
            instance['target_c'] += [C]

            instance['rule_id']  += [k]
            instance['rule']     += [' '.join(source) + ' -> ' + ' '.join(target)]

    return instance
项目:CopyNet    作者:MultiPath    | 项目源码 | 文件源码
def repeat_name(l):
    ll = []
    for word in l:
        if word2idx[word] in persons:
            k = n_rng.randint(5) + 1
            ll += [idx2word[persons[i]] for i in n_rng.randint(len(persons), size=k).tolist()]
        elif word2idx[word] in colors:
            k = n_rng.randint(5) + 1
            ll += [idx2word[colors[i]] for i in n_rng.randint(len(colors), size=k).tolist()]
        elif word2idx[word] in shapes:
            k = n_rng.randint(5) + 1
            ll += [idx2word[shapes[i]] for i in n_rng.randint(len(shapes), size=k).tolist()]
        else:
            ll += [word]
    return ll
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if random.randint(2):
            image[:, :, 1] *= random.uniform(self.lower, self.upper)

        return image, boxes, labels
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if random.randint(2):
            image[:, :, 0] += random.uniform(-self.delta, self.delta)
            image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
            image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
        return image, boxes, labels
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if random.randint(2):
            swap = self.perms[random.randint(len(self.perms))]
            shuffle = SwapChannels(swap)  # shuffle channels
            image = shuffle(image)
        return image, boxes, labels
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if random.randint(2):
            alpha = random.uniform(self.lower, self.upper)
            image *= alpha
        return image, boxes, labels
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes=None, labels=None):
        if random.randint(2):
            delta = random.uniform(-self.delta, self.delta)
            image += delta
        return image, boxes, labels
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes, classes):
        _, width, _ = image.shape
        if random.randint(2):
            image = image[:, ::-1]
            boxes = boxes.copy()
            boxes[:, 0::2] = width - boxes[:, 2::-2]
        return image, boxes, classes
项目:ssd.pytorch    作者:amdegroot    | 项目源码 | 文件源码
def __call__(self, image, boxes, labels):
        im = image.copy()
        im, boxes, labels = self.rand_brightness(im, boxes, labels)
        if random.randint(2):
            distort = Compose(self.pd[:-1])
        else:
            distort = Compose(self.pd[1:])
        im, boxes, labels = distort(im, boxes, labels)
        return self.rand_light_noise(im, boxes, labels)
项目:trendpy    作者:RonsenbergVI    | 项目源码 | 文件源码
def setUp(self):
        self.order = int(randint(low=0,high=4,size=1))
        self.dim = int(randint(low=self.order+2,high=2000,size=1))
        self.D = trendpy.globals.derivative_matrix(self.dim,self.order)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def random_panels():
    load()
    return [ digits[randint(0,len(digits))].reshape((28,28)) for digits in imgs ]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def select(data,num):
    return data[random.randint(0,data.shape[0],num)]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def select(data,num):
    return data[random.randint(0,data.shape[0],num)]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def select(data,num):
    return data[random.randint(0,data.shape[0],num)]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def _select(list):
    import random
    return list[random.randint(0,len(list)-1)]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def select(data,num):
    return data[random.randint(0,data.shape[0],num)]
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def dump_autoencoding_image(ae,test,train):
    if 'plot' not in mode:
        return
    rz = np.random.randint(0,2,(6,ae.parameters['N']))
    ae.plot_autodecode(rz,"autodecoding_random.png",verbose=True)
    ae.plot(select(test,6),"autoencoding_test.png",verbose=True)
    ae.plot(select(train,6),"autoencoding_train.png",verbose=True)