Python numpy 模块,all() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.all()

项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def write_predictions(self, inputs):
        '''
        Outputs predictions in a file named <model_name_prefix>.predictions.
        '''
        predictions = numpy.argmax(self.model.predict(inputs), axis=1)
        test_output_file = open("%s.predictions" % self.model_name_prefix, "w")
        for input_indices, prediction in zip(inputs, predictions):
            # The predictions are indices of words in padded sentences. We need to readjust them.
            padding_length = 0
            for index in input_indices:
                if numpy.all(index == 0):
                    padding_length += 1
                else:
                    break
            prediction = prediction - padding_length + 1  # +1 because the indices start at 1.
            print >>test_output_file, prediction
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def guess_feature_type(count, values):
    """Guess the type of a feature, given statistics about the feature.

    Args:
        count: Total number of observations of the feature.
        values: A list of uniqe observed values of the feature.

    Returns:
        One of: 'ordinal', 'categorical', or ''
    """
    if len(values) <= 1:
        return ''  # Feature is useless.
    if len(values) <= MAX_CATEGORIES:
        if all(is_small_int(v) for (v, c) in values):
            return ORDINAL
    if len(values) <= min(count / 2, MAX_CATEGORIES):
        return CATEGORICAL
    return ''
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def test_quantize_from_probs2(size, resolution):
    set_random_seed(make_seed(size, resolution))
    probs = np.exp(np.random.random(size)).astype(np.float32)
    probs2 = probs.reshape((1, size))
    quantized = quantize_from_probs2(probs2, resolution)
    assert quantized.shape == probs2.shape
    assert quantized.dtype == np.int8
    assert np.all(quantized.sum(axis=1) == resolution)

    # Check that quantized result is closer to target than any other value.
    quantized = quantized.reshape((size, ))
    target = resolution * probs / probs.sum()
    distance = np.abs(quantized - target).sum()
    for combo in itertools.combinations(range(size), resolution):
        other = np.zeros(size, np.int8)
        for i in combo:
            other[i] += 1
        assert other.sum() == resolution
        other_distance = np.abs(other - target).sum()
        assert distance <= other_distance
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def test_server_median(N, V, C, M):
    model = generate_fake_model(N, V, C, M)
    config = TINY_CONFIG.copy()
    config['model_num_clusters'] = M
    model['config'] = config
    server = TreeCatServer(model)

    # Evaluate on random data.
    counts = np.random.randint(10, size=[V], dtype=np.int8)
    table = generate_dataset(N, V, C)['table']
    median = server.median(counts, table.data)
    assert median.shape == table.data.shape
    assert median.dtype == np.int8
    for v in range(V):
        beg, end = table.ragged_index[v:v + 2]
        totals = median[:, beg:end].sum(axis=1)
        assert np.all(totals == counts[v])
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def test_observed_perplexity(N, V, C, M):
    set_random_seed(make_seed(N, V, C, M))
    model = generate_fake_model(N, V, C, M)
    config = TINY_CONFIG.copy()
    config['model_num_clusters'] = M
    model['config'] = config
    server = TreeCatServer(model)

    for count in [1, 2, 3]:
        if count > 1 and C > 2:
            continue  # NotImplementedError.
        counts = 1
        perplexity = server.observed_perplexity(counts)
        print(perplexity)
        assert perplexity.shape == (V, )
        assert np.all(1 <= perplexity)
        assert np.all(perplexity <= count * C)
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def observed_perplexity(self, counts):
        """Compute perplexity = exp(entropy) of observed variables.

        Perplexity is an information theoretic measure of the number of
        clusters or latent classes. Perplexity is a real number in the range
        [1, M], where M is model_num_clusters.

        Args:
            counts: A [V]-shaped array of multinomial counts.

        Returns:
            A [V]-shaped numpy array of perplexity.
        """
        V, E, M, R = self._VEMR
        if counts is not None:
            counts = np.ones(V, dtype=np.int8)
        assert counts.shape == (V, )
        assert counts.dtype == np.int8
        assert np.all(counts > 0)
        observed_entropy = np.empty(V, dtype=np.float32)
        for v in range(V):
            beg, end = self._ragged_index[v:v + 2]
            probs = np.dot(self._feat_cond[beg:end, :], self._vert_probs[v, :])
            observed_entropy[v] = multinomial_entropy(probs, counts[v])
        return np.exp(observed_entropy)
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def quantize_from_probs2(probs, resolution):
    """Quantize multiple non-normalized probs to given resolution.

    Args:
        probs: An [N, M]-shaped numpy array of non-normalized probabilities.

    Returns:
        An [N, M]-shaped array of quantized probabilities such that
        np.all(result.sum(axis=1) == resolution).
    """
    assert len(probs.shape) == 2
    N, M = probs.shape
    probs = probs / probs.sum(axis=1, keepdims=True)
    result = np.zeros(probs.shape, np.int8)
    range_N = np.arange(N, dtype=np.int32)
    for _ in range(resolution):
        sample = probs.argmax(axis=1)
        result[range_N, sample] += 1
        probs[range_N, sample] -= 1.0 / resolution
    return result
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        self._open()

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)

        if self.time_axis == 0:
            local_chunk  = self.data[t_start:t_stop, :]
        elif self.time_axis == 1:
            local_chunk  = self.data[:, t_start:t_stop].T
        self._close()

        if nodes is not None:
            if not numpy.all(nodes == numpy.arange(self.nb_channels)):
                local_chunk = numpy.take(local_chunk, nodes, axis=1)

        return self._scale_data_to_float32(local_chunk)
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def read_chunk(self, idx, chunk_size, padding=(0, 0), nodes=None):

        t_start, t_stop = self._get_t_start_t_stop(idx, chunk_size, padding)
        local_shape     = t_stop - t_start

        local_chunk = numpy.zeros((self.nb_channels, local_shape), dtype=self.data_dtype)
        data_slice  = self._get_slice_(t_start, t_stop) 

        self._open()
        count = 0

        for s in data_slice:
            t_slice = len(s)//self.nb_channels
            local_chunk[:, count:count + t_slice] = self.data[s].reshape(self.nb_channels, len(s)//self.nb_channels)
            count += t_slice

        local_chunk = local_chunk.T
        self._close()

        if nodes is not None:
            if not numpy.all(nodes == numpy.arange(self.nb_channels)):
                local_chunk = numpy.take(local_chunk, nodes, axis=1)

        return self._scale_data_to_float32(local_chunk)
项目:pytorch-semseg    作者:meetshah1995    | 项目源码 | 文件源码
def transform(self, img, lbl):
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)

        lbl = self.encode_segmap(lbl)
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        assert(np.all(classes == np.unique(lbl)))

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()
        return img, lbl
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy(self):
        """ Test iall against numpy.all """
        stream = [np.zeros((8, 16, 2)) for _ in range(11)]
        stream[3][3,0,1] = 1    # so that np.all(axis = None) evaluates to False
        stack = np.stack(stream, axis = -1)

        with self.subTest('axis = None'):
            from_numpy = np.all(stack, axis = None)
            from_stream = last(iall(stream, axis = None))
            self.assertEqual(from_numpy, from_stream)

        for axis in range(stack.ndim):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.all(stack, axis = axis)
                from_stream = last(iall(stream, axis = axis))
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_against_numpy(self):
        """ Test iany against numpy.any """
        stream = [np.zeros((8, 16, 2)) for _ in range(11)]
        stream[3][3,0,1] = 1    # so that np.all(axis = None) evaluates to False
        stack = np.stack(stream, axis = -1)

        with self.subTest('axis = None'):
            from_numpy = np.any(stack, axis = None)
            from_stream = last(iany(stream, axis = None))
            self.assertEqual(from_numpy, from_stream)

        for axis in range(stack.ndim):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.any(stack, axis = axis)
                from_stream = last(iany(stream, axis = axis))
                self.assertTrue(np.allclose(from_numpy, from_stream))
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def create_buffer_panel(self, initial_dt, bar_data):
        """
        Initialize a RollingPanel containing enough minutes to service all our
        frequencies.
        """
        max_bars_needed = max(
            freq.max_bars for freq in self.unique_frequencies
        )
        freq = '1m' if self.data_frequency == 'minute' else '1d'
        spec = HistorySpec(
            max_bars_needed + 1, freq, None, None, self.env,
            self.data_frequency,
        )

        rp = self._create_panel(
            initial_dt, spec,
        )
        self.buffer_spec = spec

        if bar_data is not None:
            frame = self.frame_from_bardata(bar_data, initial_dt)
            rp.add_frame(initial_dt, frame)

        return rp
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def load(group):
        gene_ids = list(getattr(group, cr_constants.H5_GENE_IDS_ATTR).read())

        if hasattr(group, cr_constants.H5_GENE_NAMES_ATTR):
            gene_names = list(getattr(group, cr_constants.H5_GENE_NAMES_ATTR).read())
        else:
            gene_names = gene_ids

        assert len(gene_ids) == len(gene_names)
        genes = [cr_constants.Gene(id, name, None, None, None) for id, name in itertools.izip(gene_ids, gene_names)]
        bcs = list(getattr(group, cr_constants.H5_BCS_ATTR).read())
        matrix = GeneBCMatrix(genes, bcs)

        shape = getattr(group, cr_constants.H5_MATRIX_SHAPE_ATTR).read()
        data = getattr(group, cr_constants.H5_MATRIX_DATA_ATTR).read()
        indices = getattr(group, cr_constants.H5_MATRIX_INDICES_ATTR).read()
        indptr = getattr(group, cr_constants.H5_MATRIX_INDPTR_ATTR).read()

        # quick check to make sure indptr increases monotonically (to catch overflow bugs)
        assert np.all(np.diff(indptr)>=0)

        matrix.m = sp_sparse.csc_matrix((data, indices, indptr), shape=shape)

        return matrix
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def save_barcode_summary_h5(self, filename):
        """ Generate a minimal barcode summary h5 without going through the reporter.
        NOTE: only use this if all genomes have the same set of barcodes, i.e. a raw matrix.
        """
        bc_sequences = None
        bc_table_cols = {}
        for (genome, matrix) in self.matrices.iteritems():
            if bc_sequences is None:
                bc_sequences = np.array(matrix.bcs)
                bc_table_cols[cr_constants.H5_BC_SEQUENCE_COL] = bc_sequences
            conf_mapped_deduped_reads_key = cr_utils.format_barcode_summary_h5_key(genome,
                cr_constants.TRANSCRIPTOME_REGION, cr_constants.CONF_MAPPED_DEDUPED_READ_TYPE)
            conf_mapped_deduped_reads = matrix.get_reads_per_bc()
            if len(bc_sequences) != len(conf_mapped_deduped_reads):
                raise ValueError('Cannot write barcode summary since different genomes have different number of barcodes!')
            bc_table_cols[conf_mapped_deduped_reads_key] = conf_mapped_deduped_reads
        cr_utils.write_h5(filename, bc_table_cols)
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def test_aligned_read():
    delete_layer()
    cv, data = create_layer(size=(50,50,50,1), offset=(0,0,0))
    # the last dimension is the number of channels
    assert cv[0:50,0:50,0:50].shape == (50,50,50,1)
    assert np.all(cv[0:50,0:50,0:50] == data)

    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))
    # the last dimension is the number of channels
    assert cv[0:64,0:64,0:64].shape == (64,64,64,1) 
    assert np.all(cv[0:64,0:64,0:64] ==  data[:64,:64,:64,:])

    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(10,20,0))
    cutout = cv[10:74,20:84,0:64]
    # the last dimension is the number of channels
    assert cutout.shape == (64,64,64,1) 
    assert np.all(cutout == data[:64,:64,:64,:])
    # get the second chunk
    cutout2 = cv[74:138,20:84,0:64]
    assert cutout2.shape == (64,64,64,1) 
    assert np.all(cutout2 == data[64:128,:64,:64,:])
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def test_non_aligned_read():
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(0,0,0))

    # the last dimension is the number of channels
    assert cv[31:65,0:64,0:64].shape == (34,64,64,1) 
    assert np.all(cv[31:65,0:64,0:64] == data[31:65,:64,:64,:])

    # read a single pixel
    delete_layer()
    cv, data = create_layer(size=(64,64,64,1), offset=(0,0,0))
    # the last dimension is the number of channels
    assert cv[22:23,22:23,22:23].shape == (1,1,1,1) 
    assert np.all(cv[22:23,22:23,22:23] == data[22:23,22:23,22:23,:])

    # Test steps (negative steps are not supported)
    img1 = cv[::2, ::2, ::2, :]
    img2 = cv[:, :, :, :][::2, ::2, ::2, :]
    assert np.array_equal(img1, img2)
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def test_write():
    delete_layer()
    cv, data = create_layer(size=(50,50,50,1), offset=(0,0,0))

    replacement_data = np.zeros(shape=(50,50,50,1), dtype=np.uint8)
    cv[0:50,0:50,0:50] = replacement_data
    assert np.all(cv[0:50,0:50,0:50] == replacement_data)

    replacement_data = np.random.randint(255, size=(50,50,50,1), dtype=np.uint8)
    cv[0:50,0:50,0:50] = replacement_data
    assert np.all(cv[0:50,0:50,0:50] == replacement_data)

    # out of bounds
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(10,20,0))
    with pytest.raises(ValueError):
        cv[74:150,20:84,0:64] = np.ones(shape=(64,64,64,1), dtype=np.uint8)

    # non-aligned writes
    delete_layer()
    cv, data = create_layer(size=(128,64,64,1), offset=(10,20,0))
    with pytest.raises(ValueError):
        cv[21:85,0:64,0:64] = np.ones(shape=(64,64,64,1), dtype=np.uint8)
项目:cloud-volume    作者:seung-lab    | 项目源码 | 文件源码
def exists(self, bbox_or_slices):
    """
    Produce a summary of whether all the requested chunks exist.

    bbox_or_slices: accepts either a Bbox or a tuple of slices representing
      the requested volume. 
    Returns: { chunk_file_name: boolean, ... }
    """
    if type(bbox_or_slices) is Bbox:
      requested_bbox = bbox_or_slices
    else:
      (requested_bbox, steps, channel_slice) = self.__interpret_slices(bbox_or_slices)
    realized_bbox = self.__realized_bbox(requested_bbox)
    cloudpaths = self.__chunknames(realized_bbox, self.bounds, self.key, self.underlying)

    with Storage(self.layer_cloudpath, progress=self.progress) as storage:
      existence_report = storage.files_exist(cloudpaths)
    return existence_report
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_repeat(self):
        """ Test if `repeat` works the same as np.repeat."""

        with tf.Session().as_default():
            # try different tensor types
            for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
                for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
                                   np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
                    # and all their axes
                    for axis in range(len(init_value.shape)):
                        for repeats in [1, 2, 3, 11]:
                            tensor = tf.constant(init_value, dtype=tfdtype)

                            repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
                            expected_value = np.repeat(init_value, repeats=repeats, axis=axis)

                            self.assertTrue(np.all(repeated_value == expected_value))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
    """Try import all public attributes from module into global namespace.

    Existing attributes with name clashes are renamed with prefix.
    Attributes starting with underscore are ignored by default.

    Return True on successful import.

    """
    try:
        module = __import__(module_name)
    except ImportError:
        if warn:
            warnings.warn("Failed to import module " + module_name)
    else:
        for attr in dir(module):
            if ignore and attr.startswith(ignore):
                continue
            if prefix:
                if attr in globals():
                    globals()[prefix + attr] = globals()[attr]
                elif warn:
                    warnings.warn("No Python implementation of " + attr)
            globals()[attr] = getattr(module, attr)
        return True
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __call__(self, x, inverse=False):
        """Rotates the input array `x` with a fixed rotation matrix
           (``self.dicMatrices[len(x)]``)
        """
        x = np.array(x, copy=False)
        N = x.shape[0]  # can be an array or matrix, TODO: accept also a list of arrays?
        if N not in self.dicMatrices:  # create new N-basis once and for all
            rstate = np.random.get_state()
            np.random.seed(self.seed) if self.seed else np.random.seed()
            self.state = np.random.get_state()  # only keep last state
            B = np.random.randn(N, N)
            np.random.set_state(rstate)  # keep untouched/good sequence from outside view
            for i in range(N):
                for j in range(0, i):
                    B[i] -= np.dot(B[i], B[j]) * B[j]
                B[i] /= sum(B[i]**2)**0.5
            self.dicMatrices[N] = B
        if inverse:
            return np.dot(self.dicMatrices[N].T, x)  # compute rotation
        else:
            return np.dot(self.dicMatrices[N], x)  # compute rotation
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __imul__(self, factor):
        """define ``self *= factor``.

        As a shortcut for::

            self = self.__imul__(factor)

        """
        try:
            if factor == 1:
                return self
        except: pass
        try:
            if (np.size(factor) == np.size(self.scaling) and
                    all(factor == 1)):
                return self
        except: pass
        if self.is_identity and np.size(self.scaling) == 1:
            self.scaling = np.ones(np.size(factor))
        self.is_identity = False
        self.scaling *= factor
        self.dim = np.size(self.scaling)
        return self
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def get_bareground_fn(datadir=None):
    """Calls external shell script `get_bareground.sh` to fetch:

    ~2010 global bare ground, 30 m

    Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)

    The shell script will compress all downloaded tiles using lossless LZW compression.

    http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
    """
    if datadir is None:
        datadir = iolib.get_datadir()
    bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
    if not os.path.exists(bg_fn):
        cmd = ['get_bareground.sh',]
        subprocess.call(cmd)
    return bg_fn 

#Download latest global RGI glacier db
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def lpf(x, cutoff, fs, order=5):
    """
    low pass filters signal with Butterworth digital
    filter according to cutoff frequency

    filter uses Gustafsson’s method to make sure
    forward-backward filt == backward-forward filt

    Note that edge effects are expected

    Args:
        x      (array): signal data (numpy array)
        cutoff (float): cutoff frequency (Hz)
        fs       (int): sample rate (Hz)
        order    (int): order of filter (default 5)

    Returns:
        filtered (array): low pass filtered data
    """
    nyquist = fs / 2
    b, a = butter(order, cutoff / nyquist)
    if not np.all(np.abs(np.roots(a)) < 1):
        raise PsolaError('Filter with cutoff at {} Hz is unstable given '
                         'sample frequency {} Hz'.format(cutoff, fs))
    filtered = filtfilt(b, a, x, method='gust')
    return filtered
项目:psola    作者:jcreinhold    | 项目源码 | 文件源码
def test_pitch_estimation(self):
        """
        test pitch estimation algo with contrived small example
        if pitch is within 5 Hz, then say its good (for this small example,
        since the algorithm wasn't made for this type of synthesized signal)
        """
        cfg = ExperimentConfig(pitch_strength_thresh=-np.inf)
        # the next 3 variables are in Hz
        tolerance = 5
        fs = 48000
        f = 150
        # create a sine wave of f Hz freq sampled at fs Hz
        x = np.sin(2*np.pi * f/fs * np.arange(2**10))
        # estimate the pitch, it should be close to f
        p, t, s = pest.pitch_estimation(x, fs, cfg)
        self.assertTrue(np.all(np.abs(p - f) < tolerance))
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def validate_dict(self,a_dict):
        #Check keys
        for key,val in self.dict.items():
            if not key in a_dict.keys():
                raise ValueError('key:',key,'was not in a_dict.keys()')

        for key,val in a_dict.items():
            #Check same keys
            if not key in self.dict.keys():
                raise ValueError('argument key:',key,'was not in self.dict')

            if isinstance(val,np.ndarray):
                #print('ndarray')
                my_val=self.dict[key]
                if not np.all(val.shape[1:]==my_val.shape[1:]):
                    raise ValueError('key:',key,'value shape',val.shape,'does\
                                     not match existing shape',my_val.shape)
            else: #scalar
                a_val=np.array([[val]])#[1,1]shape array
                my_val=self.dict[key]
                if not np.all(my_val.shape[1:]==a_val.shape[1:]):
                    raise ValueError('key:',key,'value shape',val.shape,'does\
                                     not match existing shape',my_val.shape)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def getColors(self, mode=None):
        """Return list of all color stops converted to the specified mode.
        If mode is None, then no conversion is done."""
        if isinstance(mode, basestring):
            mode = self.enumMap[mode.lower()]

        color = self.color
        if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':
            color = (color * 255).astype(np.ubyte)
        elif mode == self.FLOAT and color.dtype.kind != 'f':
            color = color.astype(float) / 255.

        if mode == self.QCOLOR:
            color = [QtGui.QColor(*x) for x in color]

        return color
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def register(self, name):
        """
        Add this ViewBox to the registered list of views. 

        This allows users to manually link the axes of any other ViewBox to
        this one. The specified *name* will appear in the drop-down lists for 
        axis linking in the context menus of all other views.

        The same can be accomplished by initializing the ViewBox with the *name* attribute.
        """
        ViewBox.AllViews[self] = None
        if self.name is not None:
            del ViewBox.NamedViews[self.name]
        self.name = name
        if name is not None:
            ViewBox.NamedViews[name] = self
            ViewBox.updateAllViewLists()
            sid = id(self)
            self.destroyed.connect(lambda: ViewBox.forgetView(sid, name) if (ViewBox is not None and 'sid' in locals() and 'name' in locals()) else None)
            #self.destroyed.connect(self.unregister)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def autoRange(self, padding=None, items=None, item=None):
        """
        Set the range of the view box to make all children visible.
        Note that this is not the same as enableAutoRange, which causes the view to 
        automatically auto-range whenever its contents are changed.

        ==============  ============================================================
        **Arguments:**
        padding         The fraction of the total data range to add on to the final
                        visible range. By default, this value is set between 0.02
                        and 0.1 depending on the size of the ViewBox.
        items           If specified, this is a list of items to consider when
                        determining the visible range.
        ==============  ============================================================
        """
        if item is None:
            bounds = self.childrenBoundingRect(items=items)
        else:
            print("Warning: ViewBox.autoRange(item=__) is deprecated. Use 'items' argument instead.")
            bounds = self.mapFromItemToView(item, item.boundingRect()).boundingRect()

        if bounds is not None:
            self.setRange(bounds, padding=padding)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_rescaleData():
    dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
    for dtype1 in dtypes:
        for dtype2 in dtypes:
            data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
            for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
                if dtype2.kind in 'iu':
                    lim = np.iinfo(dtype2)
                    lim = lim.min, lim.max
                else:
                    lim = (-np.inf, np.inf)
                s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
                s2 = pg.rescaleData(data, scale, offset, dtype2)
                assert s1.dtype == s2.dtype
                if dtype2.kind in 'iu':
                    assert np.all(s1 == s2)
                else:
                    assert np.allclose(s1, s2)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def assert_arrays_almost_equal(a, b, threshold, dtype=False):
    '''
    Check if two arrays have the same shape and contents that differ
    by abs(a - b) <= threshold for all elements.

    If threshold is None, do an absolute comparison rather than a relative
    comparison.
    '''
    if threshold is None:
        return assert_arrays_equal(a, b, dtype=dtype)

    assert isinstance(a, np.ndarray), "a is a %s" % type(a)
    assert isinstance(b, np.ndarray), "b is a %s" % type(b)
    assert a.shape == b.shape, "%s != %s" % (a, b)
    #assert a.dtype == b.dtype, "%s and %b not same dtype %s %s" % (a, b,
    #                                                               a.dtype,
    #                                                               b.dtype)
    if a.dtype.kind in ['f', 'c', 'i']:
        assert (abs(a - b) < threshold).all(), \
            "abs(%s - %s)    max(|a - b|) = %s    threshold:%s" % \
            (a, b, (abs(a - b)).max(), threshold)

    if dtype:
        assert a.dtype == b.dtype, \
            "%s and %s not same dtype %s and %s" % (a, b, a.dtype, b.dtype)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def getColors(self, mode=None):
        """Return list of all color stops converted to the specified mode.
        If mode is None, then no conversion is done."""
        if isinstance(mode, basestring):
            mode = self.enumMap[mode.lower()]

        color = self.color
        if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':
            color = (color * 255).astype(np.ubyte)
        elif mode == self.FLOAT and color.dtype.kind != 'f':
            color = color.astype(float) / 255.

        if mode == self.QCOLOR:
            color = [QtGui.QColor(*x) for x in color]

        return color
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def register(self, name):
        """
        Add this ViewBox to the registered list of views. 

        This allows users to manually link the axes of any other ViewBox to
        this one. The specified *name* will appear in the drop-down lists for 
        axis linking in the context menus of all other views.

        The same can be accomplished by initializing the ViewBox with the *name* attribute.
        """
        ViewBox.AllViews[self] = None
        if self.name is not None:
            del ViewBox.NamedViews[self.name]
        self.name = name
        if name is not None:
            ViewBox.NamedViews[name] = self
            ViewBox.updateAllViewLists()
            sid = id(self)
            self.destroyed.connect(lambda: ViewBox.forgetView(sid, name) if (ViewBox is not None and 'sid' in locals() and 'name' in locals()) else None)
            #self.destroyed.connect(self.unregister)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_rescaleData():
    dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
    for dtype1 in dtypes:
        for dtype2 in dtypes:
            data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
            for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
                if dtype2.kind in 'iu':
                    lim = np.iinfo(dtype2)
                    lim = lim.min, lim.max
                else:
                    lim = (-np.inf, np.inf)
                s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
                s2 = pg.rescaleData(data, scale, offset, dtype2)
                assert s1.dtype == s2.dtype
                if dtype2.kind in 'iu':
                    assert np.all(s1 == s2)
                else:
                    assert np.allclose(s1, s2)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def testMatrix():
    """
    SRTTransform3D => Transform3D => SRTTransform3D
    """
    tr = pg.SRTTransform3D()
    tr.setRotate(45, (0, 0, 1))
    tr.setScale(0.2, 0.4, 1)
    tr.setTranslate(10, 20, 40)
    assert tr.getRotation() == (45, QtGui.QVector3D(0, 0, 1))
    assert tr.getScale() == QtGui.QVector3D(0.2, 0.4, 1)
    assert tr.getTranslation() == QtGui.QVector3D(10, 20, 40)

    tr2 = pg.Transform3D(tr)
    assert np.all(tr.matrix() == tr2.matrix())

    # This is the most important test:
    # The transition from Transform3D to SRTTransform3D is a tricky one.
    tr3 = pg.SRTTransform3D(tr2)
    assert_array_almost_equal(tr.matrix(), tr3.matrix())
    assert_almost_equal(tr3.getRotation()[0], tr.getRotation()[0])
    assert_array_almost_equal(tr3.getRotation()[1], tr.getRotation()[1])
    assert_array_almost_equal(tr3.getScale(), tr.getScale())
    assert_array_almost_equal(tr3.getTranslation(), tr.getTranslation())
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def assert_arrays_almost_equal(a, b, threshold, dtype=False):
    '''
    Check if two arrays have the same shape and contents that differ
    by abs(a - b) <= threshold for all elements.

    If threshold is None, do an absolute comparison rather than a relative
    comparison.
    '''
    if threshold is None:
        return assert_arrays_equal(a, b, dtype=dtype)

    assert isinstance(a, np.ndarray), "a is a %s" % type(a)
    assert isinstance(b, np.ndarray), "b is a %s" % type(b)
    assert a.shape == b.shape, "%s != %s" % (a, b)
    #assert a.dtype == b.dtype, "%s and %b not same dtype %s %s" % (a, b,
    #                                                               a.dtype,
    #                                                               b.dtype)
    if a.dtype.kind in ['f', 'c', 'i']:
        assert (abs(a - b) < threshold).all(), \
            "abs(%s - %s)    max(|a - b|) = %s    threshold:%s" % \
            (a, b, (abs(a - b)).max(), threshold)

    if dtype:
        assert a.dtype == b.dtype, \
            "%s and %s not same dtype %s and %s" % (a, b, a.dtype, b.dtype)
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def _test_array_argument(self, format, a, test_pass):
        from numpy import array, all as all_

        try:
            snd = mixer.Sound(array=a)
        except ValueError:
            if not test_pass:
                return
            self.fail("Raised ValueError: Format %i, dtype %s" %
                      (format, a.dtype))
        if not test_pass:
            self.fail("Did not raise ValueError: Format %i, dtype %s" %
                      (format, a.dtype))
        a2 = array(snd)
        a3 = a.astype(a2.dtype)
        lshift = abs(format) - 8 * a.itemsize
        if lshift >= 0:
            # This is asymmetric with respect to downcasting.
            a3 <<= lshift
        self.assert_(all_(a2 == a3),
                     "Format %i, dtype %s" % (format, a.dtype))
项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def test_image_data_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(
            np.all(
                mask[:, self.x_start:self.x_end]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, :self.x_start]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, self.x_end:]
            )
        )
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def get_calibration_metrics(model, data):
    scores = (data['X'] * data['Y']).dot(model)

    #distinct scores

    #compute calibration error at each score

    full_metrics = {
        'scores': float('nan'),
        'count': float('nan'),
        'predicted_risk': float('nan'),
        'empirical_risk': float('nan')
    }

    cal_error = np.sqrt(np.sum(a*(a-b)^2)) ( - full_metrics['empirical_risk'])

    summary_metrics = {
        'mean_calibration_error': float('nan')
    }

    #counts
    #metrics
    #mean calibration error across all scores

    pass
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
    """Try import all public attributes from module into global namespace.

    Existing attributes with name clashes are renamed with prefix.
    Attributes starting with underscore are ignored by default.

    Return True on successful import.

    """
    try:
        module = __import__(module_name)
    except ImportError:
        if warn:
            warnings.warn("Failed to import module " + module_name)
    else:
        for attr in dir(module):
            if ignore and attr.startswith(ignore):
                continue
            if prefix:
                if attr in globals():
                    globals()[prefix + attr] = globals()[attr]
                elif warn:
                    warnings.warn("No Python implementation of " + attr)
            globals()[attr] = getattr(module, attr)
        return True
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get(languages, feature_set_str, header=False, random=False, minimal=False):

    lang_codes = languages.split()
    feature_names, feature_values = get_concatenated_sets(lang_codes, feature_set_str)
    feature_names = np.array([ f.replace(" ","_") for f in feature_names ])
    feats = {}

    if minimal:
        mask = np.all(feature_values == 0.0, axis=0)
        mask |= np.all(feature_values == 1.0, axis=0)
        mask |= np.all(feature_values == -1.0, axis=0)
        unmasked_indices = np.where(np.logical_not(mask))
    else:
        unmasked_indices = np.where(np.ones(feature_values.shape[1]))

    if random:
        feature_values = np.random.random(feature_values.shape) >= 0.5

    if header:
        print("\t".join(['CODE']+list(feature_names[unmasked_indices])))
    feat_names = feature_names[unmasked_indices]

    for i, lang_code in enumerate(lang_codes):
        values = feature_values[i,unmasked_indices].ravel()
        #values = [ '--' if f == -1 else ("%0.4f"%f).rstrip("0").rstrip(".") for f in values ]
        feats[lang_code] = values
        #print("\t".join([lang_code]+values))
    return feats, feat_names

#if __name__ == '__main__':
#    argparser = argparse.ArgumentParser()
#    argparser.add_argument("languages", default='', help="The languages of interest, in ISO 639-3 codes, separated by spaces (e.g., \"deu eng fra swe\")")
#    argparser.add_argument("feature_set", default='', help="The feature set or sets of interest (e.g., \"syntax_knn\" or \"fam\"), joined by concatenation (+) or element-wise union (|).")
#    argparser.add_argument("-f", "--fields", default=False, action="store_true", help="Print feature names as the first row of data.")
#    argparser.add_argument("-r", "--random", default=False, action="store_true", help="Randomize all feature values (e.g., to make a control group).")
#    argparser.add_argument("-m", "--minimal", default=False, action="store_true", help="Suppress columns that are all 0, all 1, or all nulls.")
#    args = argparser.parse_args()
#    get(args.languages, args.feature_set, args.fields, args.random, args.minimal)
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def get_integration_weights(order,nodes=None):
    """
    Returns the integration weights for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    See: arXive:gr-qc/0609020v1
    """
    if np.all(nodes == False):
        nodes=get_quadrature_points(order)
    if poly == polynomial.chebyshev.Chebyshev:
        weights = np.empty((order+1))
        weights[1:-1] = np.pi/order
        weights[0] = np.pi/(2*order)
        weights[-1] = weights[0]
        return weights
    elif poly == polynomial.legendre.Legendre:
        interior_weights = 2/((order+1)*order*poly.basis(order)(nodes[1:-1])**2)
        boundary_weights = np.array([1-0.5*np.sum(interior_weights)])
        weights = np.concatenate((boundary_weights,
                                  interior_weights,
                                  boundary_weights))
        return weights
    else:
        raise ValueError("Not a known polynomial type.")
        return False
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def define_attention_model(self):
        '''
        Take necessary parts out of the model to get OntoLSTM attention.
        '''
        if not self.model:
            raise RuntimeError("Model not trained yet!")
        input_shape = self.model.get_input_shape_at(0)
        input_layer = Input(input_shape[1:], dtype='int32')  # removing batch size
        embedding_layer = None
        encoder_layer = None
        for layer in self.model.layers:
            if layer.name == "embedding":
                embedding_layer = layer
            elif layer.name == "onto_lstm":
                # We need to redefine the OntoLSTM layer with the learned weights and set return attention to True.
                # Assuming we'll want attention values for all words (return_sequences = True)
                if isinstance(layer, Bidirectional):
                    onto_lstm = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
                                                  num_senses=self.num_senses, num_hyps=self.num_hyps,
                                                  use_attention=True, return_attention=True, return_sequences=True,
                                                  consume_less='gpu')
                    encoder_layer = Bidirectional(onto_lstm, weights=layer.get_weights())
                else:
                    encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim,
                                                      output_dim=self.embed_dim, num_senses=self.num_senses,
                                                      num_hyps=self.num_hyps, use_attention=True,
                                                      return_attention=True, return_sequences=True,
                                                      consume_less='gpu', weights=layer.get_weights())
                break
        if not embedding_layer or not encoder_layer:
            raise RuntimeError("Required layers not found!")
        attention_output = encoder_layer(embedding_layer(input_layer))
        self.attention_model = Model(inputs=input_layer, outputs=attention_output)
        print >>sys.stderr, "Attention model summary:"
        self.attention_model.summary()
        self.attention_model.compile(loss="mse", optimizer="sgd")  # Loss and optimizer do not matter!
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def test_FFT(FFT):
    N = FFT.N
    if FFT.rank == 0:
        A = random(N).astype(FFT.float)
        if FFT.communication == 'AlltoallN':
            C = empty(FFT.global_complex_shape(), dtype=FFT.complex)
            C = rfftn(A, C, axes=(0,1,2))
            C[:, :, -1] = 0  # Remove Nyquist frequency
            A = irfftn(C, A, axes=(0,1,2))
        B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
        B2 = rfftn(A, B2, axes=(0,1,2))

    else:
        A = zeros(N, dtype=FFT.float)
        B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)

    atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
    FFT.comm.Bcast(A, root=0)
    FFT.comm.Bcast(B2, root=0)

    a = zeros(FFT.real_shape(), dtype=FFT.float)
    c = zeros(FFT.complex_shape(), dtype=FFT.complex)
    a[:] = A[FFT.real_local_slice()]
    c = FFT.fftn(a, c)
    #print abs((c - B2[FFT.complex_local_slice()])/c.max()).max()
    assert all(abs((c - B2[FFT.complex_local_slice()])/c.max()) < rtol)
    #assert allclose(c, B2[FFT.complex_local_slice()], rtol, atol)
    a = FFT.ifftn(c, a)
    #print abs((a - A[FFT.real_local_slice()])/a.max()).max()

    assert all(abs((a - A[FFT.real_local_slice()])/a.max()) < rtol)
    #assert allclose(a, A[FFT.real_local_slice()], rtol, atol)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def is_stable(self):
        """Checks if simulation satisfies stability conditions. Does not account for instability
        due to high absorption or nonlinear effects. Includes a little headroom (1%).

        Returns:
            True if stable, False if not.
        """

        return np.all(self.material_vector('sound_velocity') <
                      0.99 * self.x.increment / self.t.increment)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def is_stable(self):
        """Checks if simulation satisfies stability conditions. Does not account for instability
        due to high absorption and includes a little headroom (1%).

        Returns:
            True if stable, False if not.
        """

        return np.all(self.material_vector('sound_velocity') <
                      0.99 * self.x.increment / self.t.increment)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def is_stable(self):
        """Checks if simulation satisfies stability conditions. Does not account for instability
        due to high absorption and includes a little headroom (1%).

        Returns:
            True if stable, False if not.
        """

        return np.all(self.material_vector('sound_velocity') <
                      0.99 * min(self.x.increment, self.y.increment) / self.t.increment)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def is_stable(self):
        """Checks if simulation satisfies stability conditions. Does not account for instability
        due to high absorption and includes a little headroom (1%).

        Returns:
            True if stable, False if not.
        """

        return np.all(self.material_vector('sound_velocity') <
                      0.99 * min(self.x.increment, self.y.increment) / self.t.increment)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def absorption_coef(self):
        """Returns a helper variable (called mu in publications by L. Claes) that sums up all
        losses into a single quantity."""

        if not self._absorption_coef:
            return (4/3 * self.shear_viscosity + self.bulk_viscosity + self.thermal_conductivity *
                    (self.isobaric_heat_cap - self.isochoric_heat_cap) /
                    (self.isobaric_heat_cap * self.isochoric_heat_cap))
        else:
            return self._absorption_coef