Python numpy 模块,full() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.full()

项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def label_and_build_mask(self, episode):
        is_catastrophe_array = np.array(
            [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
        # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])

        labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
        mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)

        for i in range(len(episode.frames)):
            if i + self.block_radius + 1 >= len(episode.frames):
                mask[i] = False
                continue
            if is_catastrophe_array[i]:
                mask[i] = False
                continue
            for j in range(self.block_radius + 1):
                if is_catastrophe_array[i + j + 1]:
                    labels[i] = True
                    break
        return labels, mask
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def label_and_build_mask(self, episode):
        is_catastrophe_array = np.array(
            [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
        # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])

        labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
        mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)

        for i in range(len(episode.frames)):
            if i + self.block_radius + 1 >= len(episode.frames):
                mask[i] = False
                continue
            if is_catastrophe_array[i]:
                mask[i] = False
                continue
            for j in range(self.block_radius + 1):
                if is_catastrophe_array[i + j + 1]:
                    labels[i] = True
                    break
        return labels, mask
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def label_and_build_mask(self, episode):
        is_catastrophe_array = np.array(
            [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
        # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])

        labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
        mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)

        for i in range(len(episode.frames)):
            if i + self.block_radius + 1 >= len(episode.frames):
                mask[i] = False
                continue
            if is_catastrophe_array[i]:
                mask[i] = False
                continue
            for j in range(self.block_radius + 1):
                if is_catastrophe_array[i + j + 1]:
                    labels[i] = True
                    break
        return labels, mask
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def label_and_build_mask(self, episode):
        is_catastrophe_array = np.array(
            [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
        # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])

        labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
        mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)

        for i in range(len(episode.frames)):
            if i + self.block_radius + 1 >= len(episode.frames):
                mask[i] = False
                continue
            if is_catastrophe_array[i]:
                mask[i] = False
                continue
            for j in range(self.block_radius + 1):
                if is_catastrophe_array[i + j + 1]:
                    labels[i] = True
                    break
        return labels, mask
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def label_and_build_mask(self, episode):
        is_catastrophe_array = np.array(
            [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
        # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])

        labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
        mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)

        for i in range(len(episode.frames)):
            if i + self.block_radius + 1 >= len(episode.frames):
                mask[i] = False
                continue
            if is_catastrophe_array[i]:
                mask[i] = False
                continue
            for j in range(self.block_radius + 1):
                if is_catastrophe_array[i + j + 1]:
                    labels[i] = True
                    break
        return labels, mask
项目:seq2seq    作者:google    | 项目源码 | 文件源码
def setUp(self):
    super(BridgeTest, self).setUp()
    self.batch_size = 4
    self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
    self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
    final_encoder_state = nest.map_structure(
        lambda x: tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, x),
            dtype=tf.float32),
        self.encoder_cell.state_size)
    self.encoder_outputs = EncoderOutput(
        outputs=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values_length=np.full([self.batch_size], 10),
        final_state=final_encoder_state)
项目:bnn-analysis    作者:myshkov    | 项目源码 | 文件源码
def _normalise_data(self):
        self.train_x_mean = np.zeros(self.input_dim)
        self.train_x_std = np.ones(self.input_dim)

        self.train_y_mean = np.zeros(self.output_dim)
        self.train_y_std = np.ones(self.output_dim)

        if self.normalise_data:
            self.train_x_mean = np.mean(self.train_x, axis=0)
            self.train_x_std = np.std(self.train_x, axis=0)
            self.train_x_std[self.train_x_std == 0] = 1.

            self.train_x = (self.train_x - np.full(self.train_x.shape, self.train_x_mean, dtype=np.float32)) / \
                           np.full(self.train_x.shape, self.train_x_std, dtype=np.float32)

            self.test_x = (self.test_x - np.full(self.test_x.shape, self.train_x_mean, dtype=np.float32)) / \
                          np.full(self.test_x.shape, self.train_x_std, dtype=np.float32)

            self.train_y_mean = np.mean(self.train_y, axis=0)
            self.train_y_std = np.std(self.train_y, axis=0)

            if self.train_y_std == 0:
                self.train_y_std[self.train_y_std == 0] = 1.

            self.train_y = (self.train_y - self.train_y_mean) / self.train_y_std
项目:tsbitmaps    作者:binhmop    | 项目源码 | 文件源码
def fit_predict(self, ts):
        """
        Unsupervised training of TSBitMaps.

        :param ts: 1-D numpy array or pandas.Series 
        :return labels: `+1` for normal observations and `-1` for abnormal observations
        """
        assert self._lag_window_size > self._feature_window_size, 'lag_window_size must be >= feature_window_size'

        self._ref_ts = ts
        scores = self._slide_chunks(ts)
        self._ref_bitmap_scores = scores

        thres = np.percentile(scores[self._lag_window_size: -self._lead_window_size + 1], self._q)

        labels = np.full(len(ts), 1)
        for idx, score in enumerate(scores):
            if score > thres:
                labels[idx] = -1

        return labels
项目:tsbitmaps    作者:binhmop    | 项目源码 | 文件源码
def create_bitmap_grid(bitmap, n, num_bins, level_size):
    """
    Arranges a time-series bitmap into a 2-D grid for heatmap visualization
    """
    assert num_bins % n == 0, 'num_bins has to be a multiple of n'
    m = num_bins // n

    row_count = int(math.pow(m, level_size))
    col_count = int(math.pow(n, level_size))

    grid = np.full((row_count, col_count), 0.0)

    for feat, count in bitmap.items():
        i, j = symbols2index(m, n, feat)
        grid[i, j] = count
    return grid
项目:corporadb    作者:nlesc-sherlock    | 项目源码 | 文件源码
def get_data(setname):
    dataset = CorporaDataSet(setname)
#    topic_word_array = dataset.getWordsInTopicMatrix()
#    topic_doc_array = dataset.getDocsInTopicMatrix()
    topic_word_array = dataset.getDocsInTopicMatrix()
    topic_doc_array = dataset.getWordsInTopicMatrix().T
    doc_length_array = numpy.full([topic_doc_array.shape[0]],1)
    vocabulary = dataset.loadVocabulary()[0].keys()
    print "topic word array shape: ",topic_word_array.shape
    print "topic doc shape: ",topic_doc_array.shape
    print "vocabulary: ",len(vocabulary)
    wordfreqs = mmread(setname + ".mtx").sum(1)
    word_freq_array = numpy.array(wordfreqs)[:,0]

    return {topic_word_key:topic_word_array,
            topic_doc_key:topic_doc_array,
            doc_length_key:doc_length_array,
            vocabulary_key:vocabulary,
            word_freq_key:word_freq_array}
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def sphankel1(n, kr):
    """Spherical Hankel (first kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    hn1 : complex float
       Spherical Hankel function hn (first kind)
    """
    n, kr = scalar_broadcast_match(n, kr)
    hn1 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
    kr_nonzero = kr != 0
    hn1[kr_nonzero] = _np.sqrt(_np.pi / 2) / _np.lib.scimath.sqrt(kr[kr_nonzero]) * hankel1(n[kr_nonzero] + 0.5, kr[kr_nonzero])
    return hn1
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def sphankel2(n, kr):
    """Spherical Hankel (second kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    hn2 : complex float
       Spherical Hankel function hn (second kind)
    """
    n, kr = scalar_broadcast_match(n, kr)
    hn2 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
    kr_nonzero = kr != 0
    hn2[kr_nonzero] = _np.sqrt(_np.pi / 2) / _np.lib.scimath.sqrt(kr[kr_nonzero]) * hankel2(n[kr_nonzero] + 0.5, kr[kr_nonzero])
    return hn2
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def dsphankel1(n, kr):
    """Derivative spherical Hankel (first kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    dhn1 : complex float
       Derivative of spherical Hankel function hn' (second kind)
    """
    n, kr = scalar_broadcast_match(n, kr)
    dhn1 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
    kr_nonzero = kr != 0
    dhn1[kr_nonzero] = 0.5 * (sphankel1(n[kr_nonzero] - 1, kr[kr_nonzero]) - sphankel1(n[kr_nonzero] + 1, kr[kr_nonzero]) - sphankel1(n[kr_nonzero], kr[kr_nonzero]) / kr[kr_nonzero])
    return dhn1
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def dsphankel2(n, kr):
    """Derivative spherical Hankel (second kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    dhn2 : complex float
       Derivative of spherical Hankel function hn' (second kind)
    """
    n, kr = scalar_broadcast_match(n, kr)
    dhn2 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
    kr_nonzero = kr != 0
    dhn2[kr_nonzero] = 0.5 * (sphankel2(n[kr_nonzero] - 1, kr[kr_nonzero]) - sphankel2(n[kr_nonzero] + 1, kr[kr_nonzero]) - sphankel2(n[kr_nonzero], kr[kr_nonzero]) / kr[kr_nonzero])
    return dhn2
项目:circletracking    作者:caspervdw    | 项目源码 | 文件源码
def test_find_multiple_noisy(self):
        """ Test finding multiple particles (noisy) """
        self.atol = 5
        radius = np.random.random() * 15 + 15
        generated_image = self.generate_image(radius, 10, noise=0.2)
        actual_number = len(generated_image.coords)
        fits = find_disks(generated_image.image, (radius / 2.0,
                                                  radius * 2.0),
                          maximum=actual_number)

        _, coords = sort_positions(generated_image.coords,
                                   np.array([fits['y'].values,
                                             fits['x'].values]).T)

        if len(fits) == 0:  # Nothing found
            actual = np.repeat([[np.nan, np.nan, np.nan]], actual_number,
                                axis=0)
        else:
            actual = fits[['r', 'y', 'x']].values.astype(np.float64)

        expected = np.array([np.full(actual_number, radius, np.float64),
                             coords[:, 0], coords[:, 1]]).T

        return np.sqrt(((actual - expected)**2).mean(0)), [0] * 3
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def test_fit(self):
        '''
        Tests the fit to samples.
        '''
        # Generate random variates
        size = 100
        samples = self.vine.rvs(size)
        # Fit mixed vine to samples
        is_continuous = np.full((self.dim), True, dtype=bool)
        is_continuous[1] = False
        vine_est = MixedVine.fit(samples, is_continuous)
        assert_approx_equal(vine_est.root.copulas[0].theta, 0.77490,
                            significant=5)
        assert_approx_equal(vine_est.root.input_layer.copulas[0].theta,
                            4.01646, significant=5)
        assert_approx_equal(vine_est.root.input_layer.copulas[1].theta,
                            4.56877, significant=5)
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def _logcdf(self, samples):
        lower = np.full(2, -np.inf)
        upper = norm.ppf(samples)
        limit_flags = np.zeros(2)
        if upper.shape[0] > 0:

            def func1d(upper1d):
                '''
                Calculates the multivariate normal cumulative distribution
                function of a single sample.
                '''
                return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]

            vals = np.apply_along_axis(func1d, -1, upper)
        else:
            vals = np.empty((0, ))
        old_settings = np.seterr(divide='ignore')
        vals = np.log(vals)
        np.seterr(**old_settings)
        vals[np.any(samples == 0.0, axis=1)] = -np.inf
        vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
        vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
        return vals
项目:geopyspark    作者:locationtech-labs    | 项目源码 | 文件源码
def test_aggregate_variance(self):
        result = self.raster_rdd.aggregate_by_cell(Operation.VARIANCE)

        band = np.array([[
            [1,   1.5, 2,   2.5, 3],
            [1.5, 2,   2.5, 3,   3.5],
            [2,   2.5, 3,   3.5, 4],
            [2.5, 3,   3.5, 4,   4.5],
            [3,   3.5, 4,   4.5, 5]]])

        expected = np.array([
            ((self.first - band) ** 2) + ((self.second - band) ** 2),
            ((self.first - band) ** 2) + ((self.second - band) ** 2)
        ])
        expected_2 = np.full((5, 5), -1.0)

        self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
        self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
项目:geopyspark    作者:locationtech-labs    | 项目源码 | 文件源码
def test_aggregate_std(self):
        result = self.raster_rdd.aggregate_by_cell(Operation.STANDARD_DEVIATION)

        band = np.array([[
            [1,   1.5, 2,   2.5, 3],
            [1.5, 2,   2.5, 3,   3.5],
            [2,   2.5, 3,   3.5, 4],
            [2.5, 3,   3.5, 4,   4.5],
            [3,   3.5, 4,   4.5, 5]]])

        expected = np.array([
            (((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2),
            (((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2)
        ])
        expected_2 = np.full((5, 5), -1.0)

        self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
        self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
项目:Supply-demand-forecasting    作者:LevinJ    | 项目源码 | 文件源码
def show_weather_bydate(self):
        self.weathdf['gap'] = self.weathdf['time_slotid'].apply(self.find_gap_by_timeslot)
        by_date = self.weathdf.groupby('time_date')
        size = len(by_date)
        col_len = row_len = math.ceil(math.sqrt(size))
        count = 1
        for name, group in by_date:
            ax=plt.subplot(row_len, col_len, count)
#             temp = np.empty(group['time_id'].shape[0])
#             temp.fill(2)

#             ax.plot(group['time_id'], group['gap']/group['gap'].max(), 'r', alpha=0.75)
#             ax.plot(group['time_id'], group['weather']/group['weather'].max())
            ax.bar(group['time_id'], group['weather'], width=1)
            ax.set_title(name)
            count = count + 1
#             plt.bar(group['time_id'], np.full(group['time_id'].shape[0], 5), width=1)

        plt.show()
        return
项目:enet-keras    作者:PavlosMelissinos    | 项目源码 | 文件源码
def _retrieve_sample(self, annotation):
        epsilon = 0.05
        high_val = 1 - epsilon
        low_val = 0 + epsilon
        coco_image = self._coco.loadImgs(annotation['image_id'])[0]
        image_path = os.path.join(self._config.data_dir['images'], coco_image['file_name'])
        image = utils.load_image(image_path)

        ann_mask = self._coco.annToMask(annotation)

        mask_categorical = np.full((ann_mask.shape[0], ann_mask.shape[1], self.num_classes()), low_val, dtype=np.float32)
        mask_categorical[:, :, 0] = high_val  # every pixel begins as background

        class_index = self._cid_to_id[annotation['category_id']]
        mask_categorical[ann_mask > 0, class_index] = high_val
        mask_categorical[ann_mask > 0, 0] = low_val  # remove background label from pixels of this (non-bg) category
        return image, mask_categorical
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def round(self, decimals=0, out=None):
        """
        Return an array rounded a to the given number of decimals.

        Refer to `numpy.around` for full documentation.

        See Also
        --------
        numpy.around : equivalent function

        """
        result = self._data.round(decimals=decimals, out=out).view(type(self))
        if result.ndim > 0:
            result._mask = self._mask
            result._update_from(self)
        elif self._mask:
            # Return masked when the scalar is masked
            result = masked
        # No explicit output: we're done
        if out is None:
            return result
        if isinstance(out, MaskedArray):
            out.__setmask__(self._mask)
        return out
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def reshape(a, new_shape, order='C'):
    """
    Returns an array containing the same data with a new shape.

    Refer to `MaskedArray.reshape` for full documentation.

    See Also
    --------
    MaskedArray.reshape : equivalent function

    """
    # We can't use 'frommethod', it whine about some parameters. Dmmit.
    try:
        return a.reshape(new_shape, order=order)
    except AttributeError:
        _tmp = narray(a, copy=False).reshape(new_shape, order=order)
        return _tmp.view(MaskedArray)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def dump(a, F):
    """
    Pickle a masked array to a file.

    This is a wrapper around ``cPickle.dump``.

    Parameters
    ----------
    a : MaskedArray
        The array to be pickled.
    F : str or file-like object
        The file to pickle `a` to. If a string, the full path to the file.

    """
    if not hasattr(F, 'readline'):
        F = open(F, 'w')
    return pickle.dump(a, F)
项目:seglink    作者:bgshih    | 项目源码 | 文件源码
def create_merge_multiple(save_path, creators, shuffle=True):
  n_sample_total = 0
  creator_indices = []
  for i, creator in enumerate(creators):
    creator._read_list()
    n_sample_total += creator.n_samples
    creator_indices.append(np.full((creator.n_samples), i, dtype=np.int))
  creator_indices = np.concatenate(creator_indices)

  if shuffle:
    np.random.shuffle(creator_indices)

  print('Start creating dataset with {} examples. Output path: {}'.format(
        n_sample_total, save_path))
  writer = tf.python_io.TFRecordWriter(save_path)
  count = 0
  for i in range(n_sample_total):
    creator = creators[creator_indices[i]]
    example = creator._create_next_sample()
    if example is not None:
      writer.write(example.SerializeToString())
      count += 1
    if i > 0 and i % 100 == 0:
      print('Progress %d / %d' % (i, n_sample_total))
  print('Done creating %d samples' % count)
项目:guesswhat    作者:GuessWhatGame    | 项目源码 | 文件源码
def list_to_padded_tokens(dialogues, tokenizer):

    # compute the length of the dialogue
    seq_length = [len(d) for d in dialogues]

    # Get dialogue numpy max size
    batch_size = len(dialogues)
    max_seq_length = max(seq_length)

    # Initialize numpy array
    padded_tokens = np.full((batch_size, max_seq_length), tokenizer.padding_token, dtype=np.int32)

    # fill the padded array with word_id
    for i, (one_path, l) in enumerate(zip(dialogues, seq_length)):
       padded_tokens[i, 0:l] = one_path

    return padded_tokens, seq_length
项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def __parse_pairs__(self, filepath, delimiter = ',', target_col = 2, column_names = list(), sequence_length = None):
        assert("target" in column_names)
        with open(filepath, "r") as f:
            lines = f.readlines()
            try:
                if sequence_length is None:
                    dataframe = pd.read_csv(filepath, sep = delimiter, skip_blank_lines = True,
                        header = None, names = column_names, index_col = False)
                    sequence_length = np.asarray(dataframe[["i", "j"]]).max()
            except ValueError:
                return None
            data = np.full((sequence_length, sequence_length), np.nan, dtype = np.double)
            np.fill_diagonal(data, Params.DISTANCE_WITH_ITSELF)
            for line in lines:
                elements = line.rstrip("\r\n").split(delimiter)
                i, j, k = int(elements[0]) - 1, int(elements[1]) - 1, float(elements[target_col])
                data[i, j] = data[j, i] = k
            if np.isnan(data).any():
                # sequence_length is wrong or the input file has missing pairs
                warnings.warn("Warning: Pairs of residues are missing from the contacts text file")
                warnings.warn("Number of missing pairs: %i " % np.isnan(data).sum())
            return data
项目:Master-Thesis    作者:AntoinePassemiers    | 项目源码 | 文件源码
def extended_2d_fancy_indexing(arr, sl1, sl2, value_of_nan):
    new_shape = tuple([sl1.stop - sl1.start, sl2.stop - sl2.start] + list(arr.shape[2:]))
    result = np.full(new_shape, value_of_nan, dtype = arr.dtype)
    x_lower = 0 if sl1.start < 0 else sl1.start
    x_upper = arr.shape[0] if sl1.stop > arr.shape[0] else sl1.stop
    y_lower = 0 if sl2.start < 0 else sl2.start
    y_upper = arr.shape[1] if sl2.stop > arr.shape[1] else sl2.stop

    new_x_lower = max(0, - sl1.stop + (sl1.stop - sl1.start))
    new_x_upper = new_x_lower + (x_upper - x_lower)
    new_y_lower = max(0, - sl2.stop + (sl2.stop - sl2.start))
    new_y_upper = new_y_lower + (y_upper - y_lower)

    if len(result.shape) == 2:
        result[new_x_lower:new_x_upper, new_y_lower:new_y_upper] = arr[x_lower:x_upper, y_lower:y_upper]
    elif len(result.shape) == 3:
        result[new_x_lower:new_x_upper, new_y_lower:new_y_upper, :] = arr[x_lower:x_upper, y_lower:y_upper, :]
    else:
        raise WrongTensorShapeError()
    return result
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def select_action(self, t, greedy_action_func, action_value=None):
        a = greedy_action_func()
        if self.ou_state is None:
            if self.start_with_mu:
                self.ou_state = np.full(a.shape, self.mu, dtype=np.float32)
            else:
                sigma_stable = (self.sigma /
                                np.sqrt(2 * self.theta - self.theta ** 2))
                self.ou_state = np.random.normal(
                    size=a.shape,
                    loc=self.mu, scale=sigma_stable).astype(np.float32)
        else:
            self.evolve()
        noise = self.ou_state
        self.logger.debug('t:%s noise:%s', t, noise)
        return a + noise
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def test_soft_copy_param(self):
        a = L.Linear(1, 5)
        b = L.Linear(1, 5)

        a.W.data[:] = 0.5
        b.W.data[:] = 1

        # a = (1 - tau) * a + tau * b
        copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)

        np.testing.assert_almost_equal(a.W.data, np.full(a.W.data.shape, 0.55))
        np.testing.assert_almost_equal(b.W.data, np.full(b.W.data.shape, 1.0))

        copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)

        np.testing.assert_almost_equal(
            a.W.data, np.full(a.W.data.shape, 0.595))
        np.testing.assert_almost_equal(b.W.data, np.full(b.W.data.shape, 1.0))
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def get_next_note_from_note(self, note):
    """Given a note, uses the model to predict the most probable next note.

    Args:
      note: A one-hot encoding of the note.
    Returns:
      Next note in the same format.
    """
    with self.graph.as_default():
      with tf.variable_scope(self.scope, reuse=True):
        singleton_lengths = np.full(self.batch_size, 1, dtype=int)

        input_batch = np.reshape(note,
                                 (self.batch_size, 1, rl_tuner_ops.NUM_CLASSES))

        softmax, self.state_value = self.session.run(
            [self.softmax, self.state_tensor],
            {self.melody_sequence: input_batch,
             self.initial_state: self.state_value,
             self.lengths: singleton_lengths})

        return self.get_note_from_softmax(softmax)
项目:house-of-enlightenment    作者:house-of-enlightenment    | 项目源码 | 文件源码
def set_pixels(self, pixels):

        hsv = np.full((self.X_MAX, self.Y_MAX, 3), 0xFF, dtype=np.uint8)
        hsv[:, :, self.wave_type] = self.pixels[2] / 0xFFFF * 0xFF
        if self.wave_type == self.VALUE:
            hsv[:, :, 1] = 0
        if self.darken_mids:
            hsv[:, :, 2] = np.abs(self.pixels[2] - (0xFFFF >> 1)) / 0xFFFF * 0xFF

        rgb = color_utils.hsv2rgb(hsv)
        pixels[:self.X_MAX, :self.Y_MAX] = rgb

        self.pixels.pop(0)

    ##
    # Calculate next frame of explicit finite difference wave
    #
项目:smrt    作者:smrt-model    | 项目源码 | 文件源码
def basic_check(self):
        # TODO Ghi: check the microstructure model is compatible.
        # if we want to be strict, only IndependentShpere should be valid, but in pratice any
        # model of sphere with a radius can make it!
        if not hasattr(self.layer.microstructure, "radius"):
            raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")

    # The phase function is inherited from Rayleigh  // Don't remove the commented code
    #    def phase(self, m, mhu):

    # The ke function is inherited from Rayleigh  // Don't remove the commented code
    # def ke(self, mhu):
    #    return np.full(2*len(mhu), self.ks+self.ka)

    # The effective_permittivity is inherited from Rayleigh  // Don't remove the commented code
    # def effective_permittivity(self):
    #    return self._effective_permittivity
项目:smrt    作者:smrt-model    | 项目源码 | 文件源码
def basic_check(self):
        # TODO Ghi: check the microstructure model is compatible.
        # if we want to be strict, only IndependentShpere should be valid, but in pratice any
        # model of sphere with a radius can make it!
        if not hasattr(self.layer.microstructure, "radius"):
            raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")

    # The phase function is inherited from Rayleigh  // Don't remove the commented code
    #    def phase(self, m, mhu):

    # The ke function is inherited from Rayleigh  // Don't remove the commented code
    # def ke(self, mhu):
    #    return np.full(2*len(mhu), self.ks+self.ka)

    # The effective_permittivity is inherited from Rayleigh  // Don't remove the commented code
    # def effective_permittivity(self):
    #    return self._effective_permittivity
项目:smrt    作者:smrt-model    | 项目源码 | 文件源码
def basic_check(self):
        # TODO Ghi: check the microstructure model is compatible.
        # if we want to be strict, only IndependentShpere should be valid, but in pratice any
        # model of sphere with a radius can make it!
        if not hasattr(self.layer.microstructure, "radius"):
            raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")

    # The phase function is inherited from Rayleigh  // Don't remove the commented code
    #    def phase(self, m, mhu):

    # The ke function is inherited from Rayleigh  // Don't remove the commented code
    # def ke(self, mhu):
    #    return np.full(2*len(mhu), self.ks+self.ka)

    # The effective_permittivity is inherited from Rayleigh  // Don't remove the commented code
    # def effective_permittivity(self):
    #    return self._effective_permittivity
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_allreduce_hint(hetr_device, config):
    if hetr_device == 'gpu':
        if 'gpu' not in ngt.transformer_choices():
            pytest.skip("GPUTransformer not available")

    input = config['input']
    device_id = config['device_id']
    axis_A = ng.make_axis(length=4, name='axis_A')
    parallel_axis = ng.make_axis(name='axis_parallel', length=16)

    with ng.metadata(device=hetr_device,
                     device_id=device_id,
                     parallel=parallel_axis):
        var_A = ng.variable(axes=[axis_A], initial_value=UniformInit(1, 1))
        var_B = ng.variable(axes=[axis_A], initial_value=UniformInit(input, input))
        var_B.metadata['reduce_func'] = 'sum'
        var_B_mean = var_B / len(device_id)
        var_minus = (var_A - var_B_mean)

    with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as hetr:
        out_comp = hetr.computation(var_minus)
        result = out_comp()
        np_result = np.full((axis_A.length), config['expected_result'], np.float32)
        np.testing.assert_array_equal(result, np_result)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_fixed_lr(iter_buf, max_iter, base_lr):
    # set up
    name = 'fixed'
    params = {'name': name,
              'max_iter': max_iter,
              'base_lr': base_lr}

    # execute
    naive_lr = np.full(max_iter, base_lr)
    lr_op = lr_policies[name]['obj'](params)(iter_buf)
    with ExecutorFactory() as ex:
        compute_lr = ex.executor(lr_op, iter_buf)
        ng_lr = [compute_lr(i).item(0) for i in range(max_iter)]

        # compare
        ng.testing.assert_allclose(ng_lr, naive_lr, atol=1e-4, rtol=1e-3)
项目:TemporalEncoding    作者:SpikeFrame    | 项目源码 | 文件源码
def plot_spikepattern(spike_trains, sim_time):
    """Plot set of spike trains (spike pattern)"""
    plt.ioff()

    plt.figure()
    for i in xrange(len(spike_trains)):
        spike_times = spike_trains[i].value
        plt.plot(spike_times, np.full(len(spike_times), i,
                 dtype=np.int), 'k.')
    plt.xlim((0.0, sim_time))
    plt.ylim((0, len(spike_trains)))
    plt.xlabel('Time (ms)')
    plt.ylabel('Neuron index')
    plt.show()

    plt.ion()
项目:TemporalEncoding    作者:SpikeFrame    | 项目源码 | 文件源码
def plot_spiker(record, spike_trains_target, neuron_index=0):
    """Plot spikeraster and target timings for given neuron index"""
    plt.ioff()

    spike_trains = [np.array(i.spiketrains[neuron_index])
                    for i in record.segments]
    n_segments = record.size['segments']

    plt.figure()
    for i in xrange(len(spike_trains)):
        plt.plot(spike_trains[i], np.full(len(spike_trains[i]), i + 1,
                 dtype=np.int), 'k.')
    target_timings = spike_trains_target[neuron_index].value
    plt.plot(target_timings, np.full(len(target_timings), 1.025 * n_segments),
             'kx', markersize=8, markeredgewidth=2)
    plt.xlim((0., np.float(record.segments[0].t_stop)))
    plt.ylim((0, np.int(1.05 * n_segments)))
    plt.xlabel('Time (ms)')
    plt.ylabel('Trials')
    plt.title('Output neuron {}'.format(neuron_index))
    plt.show()

    plt.ion()
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def __init__(self, index):
        self.name = 'Walkington(tetrahedron, {})'.format(index)

        if index == 'p5':
            self.degree = 5
            self.weights = 6 * numpy.concatenate([
                numpy.full(4, 0.018781320953002641800),
                numpy.full(4, 0.012248840519393658257),
                numpy.full(6, 0.0070910034628469110730),
                ])
            self.bary = numpy.concatenate([
                _xi1(0.31088591926330060980),
                _xi1(0.092735250310891226402),
                _xi11(0.045503704125649649492),
                ])
            self.points = self.bary[:, 1:]
            return

        # Default: scheme from general simplex
        w = walkington.Walkington(3, index)
        self.weights = w.weights
        self.bary = w.bary
        self.points = w.points
        self.degree = w.degree
        return
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def _gen5_3(n):
    '''Spherical product Lobatto formula.
    '''
    data = []
    s = sqrt(n+3)
    for k in range(1, n+1):
        rk = sqrt((k+2) * (n+3))
        Bk = fr(2**(k-n) * (n+1), (k+1) * (k+2) * (n+3))
        arr = [rk] + (n-k) * [s]
        data += [
            (Bk, pm_array0(n, arr, range(k-1, n)))
            ]
    B0 = 1 - sum([item[0]*len(item[1]) for item in data])
    data += [
        (B0, numpy.full((1, n), 0))
        ]
    return 5, data
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def setup_rw(params):
    pore = get_pore(**params)
    rw = RandomWalk(pore, **params)
    rw.add_wall_binding(t=params.t_bind, p=params.p_bind, eps=params.eps_bind)

    # define non-standard stopping criteria
    Tmax = params.Tmax
    Rmax = params.Rmax

    def success(self, r, z):
        return self.in_channel(r, z) & (z <= params.zstop)

    def fail(self, r, z):
        if self.t > Tmax:
            return np.full(r.shape, True, dtype=bool)
        toolong = (self.times[self.alive] + self.bind_times[self.alive]) > 5e6
        toofar = r**2 + z**2 > Rmax**2
        return toolong | toofar

    rw.set_stopping_criteria(success, fail)
    return rw

########### STREAMLINE PLOT  ###########
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def move_ellipses(self, coll, cyl=False):
        xz = self.x[:, ::2] if not cyl else np.column_stack(
           [np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]])
        coll.set_offsets(xz)
        #inside = self.inside_wall()
        #margin = np.nonzero(self.alive)[0][self.inside_wall(2.)]
        colors = np.full((self.N,), "b", dtype=str)
        #colors[margin] = "r"
        colors[self.success] = "k"
        colors[self.fail] = "k"
        colors[self.alive & ~self.can_bind] = "r"
        #colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)]
        coll.set_facecolors(colors)
        #y = self.x[:, 1]
        #d = 50.
        #sizes = self.params.rMolecule*(1. + y/d)
        #coll.set(widths=sizes, heights=sizes)
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def sample_scalar(self, shape, a):
        AMAX = 30
        if a > AMAX:
            return np.random.poisson(a, shape)
        k = 1
        K = np.full(shape, k)
        s = a/np.expm1(a)
        S = s
        U = np.random.random(shape)
        new = S < U
        while np.any(new):
            k += 1
            K[new] = k
            s = s*a/float(k)
            S = S + s
            new = S < U
        return K
项目:geoviews    作者:ioam    | 项目源码 | 文件源码
def values(cls, dataset, dimension, expanded, flat):
        dimension = dataset.get_dimension(dimension)
        idx = dataset.get_dimension_index(dimension)
        data = dataset.data
        if idx not in [0, 1] and not expanded:
            return data[dimension.name].values
        values = []
        columns = list(data.columns)
        arr = geom_to_array(data.geometry.iloc[0])
        ds = dataset.clone(arr, datatype=cls.subtypes, vdims=[])
        for i, d in enumerate(data.geometry):
            arr = geom_to_array(d)
            if idx in [0, 1]:
                ds.data = arr
                values.append(ds.interface.values(ds, dimension))
            else:
                arr = np.full(len(arr), data.iloc[i, columns.index(dimension.name)])
                values.append(arr)
            values.append([np.NaN])
        return np.concatenate(values[:-1]) if values else np.array([])
项目:MOQA    作者:pprakhar30    | 项目源码 | 文件源码
def create_validTest_data(self):

        for i in range(len(self.validTestQ)):
            qId         = self.validTestQ[i]
            item        = self.corpus.QAnswers[qId].itemId
            question    = self.corpus.QAnswers[qId].qFeature
            answer_list = [qId, self.validTestNa[i]]

            Pairwise    = self.create_dense_pairwise(item, qId)
            Question    = self.create_sparse_one(qFeature = question)
            Answer      = self.create_sparse_one(answer_list = answer_list) 
            Review      = self.Review[item]
            TermtoTermR     = self.create_sparse_two(item, qFeature = question)
            TermtoTermP     = self.create_sparse_two(item, answer_list = answer_list)

            Question_I      = (Question[0], Question[1] if Question[1].size == 1 and Question[1][0] == 0 else np.full((Question[1].size), 1.0/np.sqrt(Question[1].size)), Question[2])
            Answer_I        = (Answer[0], Answer[1] if Answer[1].size == 1 and Answer[1][0] == 0 else np.full((Answer[1].size), 1.0/np.sqrt(Answer[1].size)), Answer[2])
            Review_I    = (Review[0], np.full((Review[1].size), 1.0/np.sqrt(Review[1].size)), Review[2])

            self.validTestM.append((Pairwise, Question, Answer, Review, TermtoTermR, TermtoTermP, Question_I, Answer_I, Review_I))
项目:conv_seq2seq    作者:tobyyouup    | 项目源码 | 文件源码
def setUp(self):
    super(BridgeTest, self).setUp()
    self.batch_size = 4
    self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
    self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
        [tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
    final_encoder_state = nest.map_structure(
        lambda x: tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, x),
            dtype=tf.float32),
        self.encoder_cell.state_size)
    self.encoder_outputs = EncoderOutput(
        outputs=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values=tf.convert_to_tensor(
            value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
        attention_values_length=np.full([self.batch_size], 10),
        final_state=final_encoder_state)
项目:dense_graph_reducer    作者:MarcoFiorucci    | 项目源码 | 文件源码
def dominant_sets(graph_mat, max_k=0, tol=1e-5, max_iter=1000):
    graph_cardinality = graph_mat.shape[0]
    if max_k == 0:
        max_k = graph_cardinality
    clusters = np.zeros(graph_cardinality)
    already_clustered = np.full(graph_cardinality, False, dtype=np.bool)

    for k in range(max_k):
        if graph_cardinality - already_clustered.sum() <= ceil(0.05 * graph_cardinality):
            break
        # 1000 is added to obtain more similar values when x is normalized
        # x = np.random.random_sample(graph_cardinality) + 1000.0
        x = np.full(graph_cardinality, 1.0)
        x[already_clustered] = 0.0
        x /= x.sum()

        y = replicator(graph_mat, x, np.where(~already_clustered)[0], tol, max_iter)
        cluster = np.where(y >= 1.0 / (graph_cardinality * 1.5))[0]
        already_clustered[cluster] = True
        clusters[cluster] = k
    clusters[~already_clustered] = k
    return clusters
项目:nec_tensorflow    作者:toth-adam    | 项目源码 | 文件源码
def _search_ann(self, search_keys, dnd_keys, update_LRU_order):
        batch_indices = []
        for act, ann in self.anns.items():
            # These are the indices we get back from ANN search
            indices = ann.query(search_keys)
            log.debug("ANN indices for action {}: {}".format(act, indices))
            # Create numpy array with full of corresponding action vector index
            action_indices = np.full(indices.shape, self.action_vector.index(act))
            log.debug("Action indices for action {}: {}".format(act, action_indices))
            # Riffle two arrays
            tf_indices = self._riffle_arrays(action_indices, indices)
            batch_indices.append(tf_indices)
            # Very important part: Modify LRU Order here
            # Doesn't work without tabular update of course!
            if update_LRU_order == 1:
                _ = [self.tf_index__state_hash[act][i] for i in indices.ravel()]
        np_batch = np.asarray(batch_indices)
        log.debug("Batch update indices: {}".format(np_batch))

        # Reshaping to gather_nd compatible format
        final_indices = np.asarray([np_batch[:, j, :, :] for j in range(np_batch.shape[1])], dtype=np.int32)

        return final_indices
项目:xdesign    作者:tomography    | 项目源码 | 文件源码
def contains(self, other):
        if isinstance(other, Point):
            x = other._x
        elif isinstance(other, np.ndarray):
            x = other
        elif isinstance(other, Polygon):
            x = _points_to_array(other.vertices)
            return np.all(self.contains(x))
        else:
            raise TypeError("P must be point or ndarray")

        # keep track of whether each point is contained in a face
        bools = np.full(x.shape[0], False, dtype=bool)
        for f in self.faces:
            bools = np.logical_or(bools, f.contains(x))
        return bools