Python numpy 模块,ndarray() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.ndarray()

项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def make_grid(I, ncols=8):
    assert isinstance(I, np.ndarray), 'plugin error, should pass numpy array here'
    assert I.ndim == 4 and I.shape[1] == 3
    nimg = I.shape[0]
    H = I.shape[2]
    W = I.shape[3]
    ncols = min(nimg, ncols)
    nrows = int(np.ceil(float(nimg) / ncols))
    canvas = np.zeros((3, H * nrows, W * ncols))
    i = 0
    for y in range(nrows):
        for x in range(ncols):
            if i >= nimg:
                break
            canvas[:, y * H:(y + 1) * H, x * W:(x + 1) * W] = I[i]
            i = i + 1
    return canvas
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def sizeFiltering(contours):
    """
    this function filters out the smaller retroreflector (as well as any noise) by size
    """

    if len(contours) == 0:
        print "sizeFiltering: Error, no contours found"
        return 0

    big = contours[0]

    for c in contours:
        if type(c) and type(big) == numpy.ndarray:
            if cv2.contourArea(c) > cv2.contourArea(big):
                big = c
        else:
            print type(c) and type(big)
            return 0
    x,y,w,h = cv2.boundingRect(big)
    return big
项目:topically-driven-language-model    作者:jhlau    | 项目源码 | 文件源码
def generate(self, sess, conv_hidden, start_word_id, temperature, max_length, stop_word_id):
        state = sess.run(self.cell.zero_state(1, tf.float32))
        x = [[start_word_id]]
        sent = [start_word_id]

        for _ in xrange(max_length):
            if type(conv_hidden) is np.ndarray:
            #if conv_hidden != None:
                probs, state = sess.run([self.probs, self.state], \
                    {self.x: x, self.initial_state: state, self.conv_hidden: conv_hidden})
            else:
                probs, state = sess.run([self.probs, self.state], \
                    {self.x: x, self.initial_state: state})
            sent.append(self.sample(probs[0], temperature))
            if sent[-1] == stop_word_id:
                break
            x = [[ sent[-1] ]]

        return sent

    #generate a sequence of words, given a topic
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def __keytransform__(self, key):
        if isinstance(key[0], np.ndarray):
            shape = key[0].shape
            dtype = key[0].dtype
            i = key[1]
            zero = True if len(key) == 2 else key[2]

        elif isinstance(key[0], tuple):
            if len(key) == 3:
                shape, dtype, i = key
                zero = True

            elif len(key) == 4:
                shape, dtype, i, zero = key

        else:
            raise TypeError("Wrong type of key for work array")

        assert isinstance(zero, bool)
        assert isinstance(i, int)
        self.fillzero = zero
        return (shape, np.dtype(dtype), i)
项目:pyfds    作者:emtpb    | 项目源码 | 文件源码
def apply(self, old_values, step):
        """Apply the boundary.

        Args:
            old_values: Old values of the points in the boundary.
            step: Time step of the simulation (required if signals are to be applied).

        Returns:
            New values for the points in the boundary.
        """

        if np.ndim(self.value) == 0 or \
                (np.ndim(self.value) == 1 and type(self.value) == list):
            # if a single value or a list of single values for each index is given
                return self.additive * old_values + self.value
        elif type(self.value) == np.ndarray:
            # if a signal is given
            return self.additive * old_values + self.value[step]
        else:
            # if a list of signals for each index is given
            return [self.additive * old_values[ii] + signal[step]
                    for ii, signal in enumerate(self.value)]
项目:YellowFin_Pytorch    作者:JianGoForIt    | 项目源码 | 文件源码
def test_accuracy_full_batch(tokens, features, mini_batch_size, word_attn, sent_attn, th=0.5):
    p = []
    l = []
    cnt = 0
    g = gen_minibatch1(tokens, features, mini_batch_size, False)
    for token, feature in g:
        if cnt % 100 == 0:
            print(cnt)
        cnt +=1
#         print token.size()
#         y_pred = get_predictions(token, word_attn, sent_attn)
#         print y_pred
        y_pred = get_predictions(token, feature, word_attn, sent_attn)
#         print y_pred
#         _, y_pred = torch.max(y_pred, 1)
#         y_pred = y_pred[:, 1]
#         print y_pred
        p.append(np.ndarray.flatten(y_pred.data.cpu().numpy()))
    p = [item for sublist in p for item in sublist]
    p = np.array(p)
    return p
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def __init__(self, N, V, tree_prior, config):
        """Initialize a model with an empty subsample.

        Args:
            N (int): Number of rows in the dataset.
            V (int): Number of columns (features) in the dataset.
            tree_prior: A [K]-shaped numpy array of prior edge log odds, where
                K is the number of edges in the complete graph on V vertices.
            config: A global config dict.
        """
        assert isinstance(N, int)
        assert isinstance(V, int)
        assert isinstance(tree_prior, np.ndarray)
        assert isinstance(config, dict)
        K = V * (V - 1) // 2  # Number of edges in complete graph.
        assert V <= 32768, 'Invalid # features > 32768: {}'.format(V)
        assert tree_prior.shape == (K, )
        assert tree_prior.dtype == np.float32
        self._config = config.copy()
        self._num_rows = N
        self._tree_prior = tree_prior
        self._tree = TreeStructure(V)
        assert self._tree.num_vertices == V
        self._program = make_propagation_program(self._tree.tree_grid)
        self._added_rows = set()
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def __init__(self, data, tree_prior, config):
        """Initialize a model with an empty subsample.

        Args:
            data: An [N, V]-shaped numpy array of real-valued data.
            tree_prior: A [K]-shaped numpy array of prior edge log odds, where
                K is the number of edges in the complete graph on V vertices.
            config: A global config dict.
        """
        assert isinstance(data, np.ndarray)
        data = np.asarray(data, np.float32)
        assert len(data.shape) == 2
        N, V = data.shape
        D = config['model_latent_dim']
        E = V - 1  # Number of edges in the tree.
        TreeTrainer.__init__(self, N, V, tree_prior, config)
        self._data = data
        self._latent = np.zeros([N, V, D], np.float32)

        # This is symmetric positive definite.
        self._vert_ss = np.zeros([V, D, D], np.float32)
        # This is arbitrary (not necessarily symmetric).
        self._edge_ss = np.zeros([E, D, D], np.float32)
        # This represents (count, mean, covariance).
        self._feat_ss = np.zeros([V, D, 1 + 1 + D], np.float32)
项目:core-framework    作者:RedhawkSDR    | 项目源码 | 文件源码
def set_type2000_format(format=list):
    """
    Sets the data format returned when reading in type 2000 files.

    The default is 'list', meaning a list of NumPy arrays, where the
    length of each array is equal to the frame size. To return type 2000
    data as a 2-d array, <format> should be 'numpy.ndarray', e.g.:

      import bluefile, numpy
      bluefile.set_type2000_format(numpy.ndarray)

    Note that <format> is expected to a type object.
    """
    global _type2000_format
    if format not in [ list, numpy.ndarray ]:
        raise TypeError, 'Only list and numpy.ndarray are supported'
    _type2000_format = format
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def array_stream(func):
    """ 
    Decorates streaming functions to make sure that the stream
    is a stream of ndarrays. Objects that are not arrays are transformed 
    into arrays. If the stream is in fact a single ndarray, this ndarray 
    is repackaged into a sequence of length 1.

    The first argument of the decorated function is assumed to be an iterable of
    arrays, or an iterable of objects that can be casted to arrays.
    """
    @wraps(func)    # thanks functools
    def decorated(arrays, *args, **kwargs):
        if isinstance(arrays, ndarray):
            arrays = (arrays,)
        return func(map(atleast_1d, arrays), *args, **kwargs)
    return decorated
项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def iload(files, load_func, **kwargs):
    """
    Create a stream of arrays from files, which are loaded lazily.

    Parameters
    ----------
    pattern : iterable of str or str
        Either an iterable of filenames or a glob-like pattern str.
    load_func : callable, optional
        Function taking a filename as its first arguments
    kwargs
        Keyword arguments are passed to ``load_func``.

    Yields
    ------
    arr: `~numpy.ndarray`
        Loaded data. 
    """
    if isinstance(files, str):
        files = iglob(files)
    files = iter(files)

    yield from map(partial(load_func, **kwargs), files)

# pmap does not support local functions
项目:pyelastix    作者:almarklein    | 项目源码 | 文件源码
def _get_fixed_params(im):
    """ Parameters that the user has no influence on. Mostly chosen
    bases on the input images.
    """

    p = Parameters()

    if not isinstance(im, np.ndarray):
        return p

    # Dimension of the inputs
    p.FixedImageDimension = im.ndim
    p.MovingImageDimension = im.ndim

    # Always write result, so I can verify
    p.WriteResultImage = True

    # How to write the result
    tmp = DTYPE_NP2ITK[im.dtype.name]
    p.ResultImagePixelType = tmp.split('_')[-1].lower()
    p.ResultImageFormat = "mhd"

    # Done
    return p
项目:xpandas    作者:alan-turing-institute    | 项目源码 | 文件源码
def __init__(self, skimage_function=None, **function_params):
        '''
        :param skimage_function: transformation function from skimage
        '''
        accepted_types = [
            list, np.ndarray, np.array
        ]

        if skimage_function is None:
            raise Exception('Please specify transform function from scikit-image'
                            ' http://scikit-image.org/docs/dev/api/skimage.transform.html')

        def image_transform_function(img):
            return skimage_function(img, **function_params)

        super(ImageTransformer, self).__init__(data_types=accepted_types,
                                               columns=None,
                                               transform_function=image_transform_function)
项目:xpandas    作者:alan-turing-institute    | 项目源码 | 文件源码
def test_image_transformation():
    s = XSeries([generate_image(False) for _ in range(100)])

    try:
        image_transformer = ImageTransformer().fit()
        assert False
    except:
        assert True

    image_transformer = ImageTransformer(skimage_transform.hough_circle, radius=5).fit()
    s_transformed = image_transformer.transform(s)

    assert s_transformed.data_type == np.ndarray

    image_transformer = ImageTransformer(skimage_transform.resize, output_shape=(10, 10)).fit()
    s_transformed = image_transformer.transform(s)

    assert s_transformed.data_type == np.ndarray
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def compute_nearest_neighbors(submatrix, balltree, k, row_start):
    """ Compute k nearest neighbors on a submatrix
    Args: submatrix (np.ndarray): Data submatrix
          balltree: Nearest neighbor index (from sklearn)
          k: number of nearest neigbors to compute
          row_start: row offset into larger matrix
    Returns a COO sparse adjacency matrix of nearest neighbor relations as (i,j,x)"""

    nn_dist, nn_idx = balltree.query(submatrix, k=k+1)

    # Remove the self-as-neighbors
    nn_idx = nn_idx[:,1:]
    nn_dist = nn_dist[:,1:]

    # Construct a COO sparse matrix of edges and distances
    i = np.repeat(row_start + np.arange(nn_idx.shape[0]), k)
    j = nn_idx.ravel().astype(int)
    return (i, j, nn_dist.ravel())
项目:live-plotter    作者:anandtrex    | 项目源码 | 文件源码
def plot_loop(self, image, it):
        """
        The actual function that updates the data in the plot initialized in :meth:`~.init`

        :param image: The image that is recorded with :class:`~.PlotRecorder`. It should be a 2-D numpy array
        :param it: The iteration number (independent of the actual x value)
        :return:
        """
        logger.debug("Plotting %s in %s", self.var_name, self.entity_name)

        assert isinstance(image, np.ndarray), "The passed in image should by a numpy array"
        assert len(image.shape) == 2, "The image to be shown should be 2-dimensional"

        if it == 0:
            self.im = self.ax.imshow(image, **self.imshow_kwargs)

        else:
            if it % self.plot_frequency == 0:
                self.im.set_array(image)
项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def _prepare_image(I):
    assert isinstance(I, np.ndarray), 'plugin error, should pass numpy array here'
    assert I.ndim == 2 or I.ndim == 3 or I.ndim == 4
    if I.ndim == 4:  # NCHW
        if I.shape[1] == 1:  # N1HW
            I = np.concatenate((I, I, I), 1)  # N3HW
        assert I.shape[1] == 3
        I = make_grid(I)  # 3xHxW
    if I.ndim == 3 and I.shape[0] == 1:  # 1xHxW
        I = np.concatenate((I, I, I), 0)  # 3xHxW
    if I.ndim == 2:  # HxW
        I = np.expand_dims(I, 0)  # 1xHxW
        I = np.concatenate((I, I, I), 0)  # 3xHxW
    I = I.transpose(1, 2, 0)

    return I
项目:slim-python    作者:ustunb    | 项目源码 | 文件源码
def check_numeric_input(self, input_name, input_value):
        if type(input_value) is np.ndarray:

            if input_value.size == self.P:
                setattr(self, input_name, input_value)
            elif input_value.size == 1:
                setattr(self, input_name, input_value*np.ones(self.P))
            else:
                raise ValueError("length of %s is %d; should be %d" % (input_name, input_value.size, self.P))

        elif type(input_value) is float or type(input_value) is int:
            setattr(self, input_name, float(input_value)*np.ones(self.P))

        elif type(input_value) is list:
            if len(input_value) == self.P:
                setattr(self, input_name, np.array([float(x) for x in input_value]))
            elif len(input_value) == 1:
                setattr(self, input_name, np.array([float(x) for x in input_value]) * np.ones(self.P))
            else:
                raise ValueError("length of %s is %d; should be %d" % (input_name, len(input_value), self.P))

        else:
            raise ValueError("user provided %s with an unsupported type" % (input_name))
项目:brain_segmentation    作者:Ryo-Ito    | 项目源码 | 文件源码
def load_nifti(filename, with_affine=False):
    """
    load image from NIFTI file
    Parameters
    ----------
    filename : str
        filename of NIFTI file
    with_affine : bool
        if True, returns affine parameters

    Returns
    -------
    data : np.ndarray
        image data
    """
    img = nib.load(filename)
    data = img.get_data()
    data = np.copy(data, order="C")
    if with_affine:
        return data, img.affine
    return data
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def update_image_property(self, property_name, property_data, erase_property=False):
        if isinstance(property_data,list) or isinstance(property_data,np.ndarray):
            assert len(property_data) == len(self._labels)
            property_keys = self._labels
        elif isinstance(property_data,dict) or isinstance(property_data,array_dict):
            property_keys = np.sort(property_data.keys())
            property_data = [property_data[l] for l in property_keys]

        if property_name in self._properties.keys():
            if erase_property:
                self._properties[property_name] = array_dict(property_data,keys=property_keys)
            else:
                for l,v in zip(property_keys,property_data):
                    self._properties[property_name][l] = v
        else:
            print "Creating property ",property_name," on image"
            self._properties[property_name] = array_dict(property_data,keys=property_keys)
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def spatial_image_analysis_property(sia, property_name, labels=None):

    if property_name == 'volume':
        property_data = sia.volume(labels)
    elif property_name == 'neighborhood_size':
        property_data = sia.neighbors_number(labels)
    elif property_name == 'shape_anisotropy':
        inertia_axes_vectors, inertia_axes_values = sia.inertia_axis(labels)
        property_data = [fractional_anisotropy(inertia_axes_values[l]) for l in labels]
    elif property_name == 'gaussian_curvature':
        property_data = sia.gaussian_curvature_CGAL(labels)
    else:
        property_data = dict(zip(labels,labels))

    if isinstance(property_data,np.ndarray) or isinstance(property_data,list):
        property_data = array_dict(property_data, keys=labels)
    elif isinstance(property_data,dict):
        property_data = array_dict(property_data) 

    return property_data
项目:sharedbuffers    作者:jampp    | 项目源码 | 文件源码
def __init__(self, buf, offset = 0):
        # Accelerate class attributes
        self._encode = self.encode
        self._dtype = self.dtype
        self._xxh = self.xxh

        # Initialize buffer
        if offset:
            self._buf = self._likebuf = buffer(buf, offset)
        else:
            self._buf = buf
            self._likebuf = _likebuffer(buf)

        # Parse header and map index
        self.index_elements, self.index_offset = self._Header.unpack_from(self._buf, 0)

        self.index = numpy.ndarray(buffer = self._buf, 
            offset = self.index_offset, 
            dtype = self.dtype, 
            shape = (self.index_elements, 3))
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def validate_dict(self,a_dict):
        #Check keys
        for key,val in self.dict.items():
            if not key in a_dict.keys():
                raise ValueError('key:',key,'was not in a_dict.keys()')

        for key,val in a_dict.items():
            #Check same keys
            if not key in self.dict.keys():
                raise ValueError('argument key:',key,'was not in self.dict')

            if isinstance(val,np.ndarray):
                #print('ndarray')
                my_val=self.dict[key]
                if not np.all(val.shape[1:]==my_val.shape[1:]):
                    raise ValueError('key:',key,'value shape',val.shape,'does\
                                     not match existing shape',my_val.shape)
            else: #scalar
                a_val=np.array([[val]])#[1,1]shape array
                my_val=self.dict[key]
                if not np.all(my_val.shape[1:]==a_val.shape[1:]):
                    raise ValueError('key:',key,'value shape',val.shape,'does\
                                     not match existing shape',my_val.shape)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def roiChanged(self):
        if self.image is None:
            return

        image = self.getProcessedImage()
        if image.ndim == 2:
            axes = (0, 1)
        elif image.ndim == 3:
            axes = (1, 2)
        else:
            return

        data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
        if data is not None:
            while data.ndim > 1:
                data = data.mean(axis=1)
            if image.ndim == 3:
                self.roiCurve.setData(y=data, x=self.tVals)
            else:
                while coords.ndim > 2:
                    coords = coords[:,:,0]
                coords = coords - coords[:,0,np.newaxis]
                xvals = (coords**2).sum(axis=0) ** 0.5
                self.roiCurve.setData(y=data, x=xvals)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def dataType(obj):
    if hasattr(obj, '__len__') and len(obj) == 0:
        return 'empty'
    if isinstance(obj, dict):
        return 'dictOfLists'
    elif isSequence(obj):
        first = obj[0]

        if (hasattr(obj, 'implements') and obj.implements('MetaArray')):
            return 'MetaArray'
        elif isinstance(obj, np.ndarray):
            if obj.ndim == 1:
                if obj.dtype.names is None:
                    return 'listOfValues'
                else:
                    return 'recarray'
            elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:
                return 'Nx2array'
            else:
                raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))
        elif isinstance(first, dict):
            return 'listOfDicts'
        else:
            return 'listOfValues'
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _plotMetaArray(self, arr, x=None, autoLabel=True, **kargs):
        inf = arr.infoCopy()
        if arr.ndim != 1:
            raise Exception('can only automatically plot 1 dimensional arrays.')
        ## create curve
        try:
            xv = arr.xvals(0)
        except:
            if x is None:
                xv = np.arange(arr.shape[0])
            else:
                xv = x
        c = PlotCurveItem(**kargs)
        c.setData(x=xv, y=arr.view(np.ndarray))

        if autoLabel:
            name = arr._info[0].get('name', None)
            units = arr._info[0].get('units', None)
            self.setLabel('bottom', text=name, units=units)

            name = arr._info[1].get('name', None)
            units = arr._info[1].get('units', None)
            self.setLabel('left', text=name, units=units)

        return c
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setPen(self, *args, **kargs):
        """Set the pen(s) used to draw the outline around each spot.
        If a list or array is provided, then the pen for each spot will be set separately.
        Otherwise, the arguments are passed to pg.mkPen and used as the default pen for
        all spots which do not have a pen explicitly set."""
        update = kargs.pop('update', True)
        dataSet = kargs.pop('dataSet', self.data)

        if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):
            pens = args[0]
            if 'mask' in kargs and kargs['mask'] is not None:
                pens = pens[kargs['mask']]
            if len(pens) != len(dataSet):
                raise Exception("Number of pens does not match number of points (%d != %d)" % (len(pens), len(dataSet)))
            dataSet['pen'] = pens
        else:
            self.opts['pen'] = fn.mkPen(*args, **kargs)

        dataSet['sourceRect'] = None
        if update:
            self.updateSpots(dataSet)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setBrush(self, *args, **kargs):
        """Set the brush(es) used to fill the interior of each spot.
        If a list or array is provided, then the brush for each spot will be set separately.
        Otherwise, the arguments are passed to pg.mkBrush and used as the default brush for
        all spots which do not have a brush explicitly set."""
        update = kargs.pop('update', True)
        dataSet = kargs.pop('dataSet', self.data)

        if len(args) == 1 and (isinstance(args[0], np.ndarray) or isinstance(args[0], list)):
            brushes = args[0]
            if 'mask' in kargs and kargs['mask'] is not None:
                brushes = brushes[kargs['mask']]
            if len(brushes) != len(dataSet):
                raise Exception("Number of brushes does not match number of points (%d != %d)" % (len(brushes), len(dataSet)))
            dataSet['brush'] = brushes
        else:
            self.opts['brush'] = fn.mkBrush(*args, **kargs)
            #self._spotPixmap = None

        dataSet['sourceRect'] = None
        if update:
            self.updateSpots(dataSet)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setSymbol(self, symbol, update=True, dataSet=None, mask=None):
        """Set the symbol(s) used to draw each spot.
        If a list or array is provided, then the symbol for each spot will be set separately.
        Otherwise, the argument will be used as the default symbol for
        all spots which do not have a symbol explicitly set."""
        if dataSet is None:
            dataSet = self.data

        if isinstance(symbol, np.ndarray) or isinstance(symbol, list):
            symbols = symbol
            if mask is not None:
                symbols = symbols[mask]
            if len(symbols) != len(dataSet):
                raise Exception("Number of symbols does not match number of points (%d != %d)" % (len(symbols), len(dataSet)))
            dataSet['symbol'] = symbols
        else:
            self.opts['symbol'] = symbol
            self._spotPixmap = None

        dataSet['sourceRect'] = None
        if update:
            self.updateSpots(dataSet)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def setSize(self, size, update=True, dataSet=None, mask=None):
        """Set the size(s) used to draw each spot.
        If a list or array is provided, then the size for each spot will be set separately.
        Otherwise, the argument will be used as the default size for
        all spots which do not have a size explicitly set."""
        if dataSet is None:
            dataSet = self.data

        if isinstance(size, np.ndarray) or isinstance(size, list):
            sizes = size
            if mask is not None:
                sizes = sizes[mask]
            if len(sizes) != len(dataSet):
                raise Exception("Number of sizes does not match number of points (%d != %d)" % (len(sizes), len(dataSet)))
            dataSet['size'] = sizes
        else:
            self.opts['size'] = size
            self._spotPixmap = None

        dataSet['sourceRect'] = None
        if update:
            self.updateSpots(dataSet)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def iteratorFn(self, data):
        ## Return 1) a function that will provide an iterator for data and 2) a list of header strings
        if isinstance(data, list) or isinstance(data, tuple):
            return lambda d: d.__iter__(), None
        elif isinstance(data, dict):
            return lambda d: iter(d.values()), list(map(asUnicode, data.keys()))
        elif (hasattr(data, 'implements') and data.implements('MetaArray')):
            if data.axisHasColumns(0):
                header = [asUnicode(data.columnName(0, i)) for i in range(data.shape[0])]
            elif data.axisHasValues(0):
                header = list(map(asUnicode, data.xvals(0)))
            else:
                header = None
            return self.iterFirstAxis, header
        elif isinstance(data, np.ndarray):
            return self.iterFirstAxis, None
        elif isinstance(data, np.void):
            return self.iterate, list(map(asUnicode, data.dtype.names))
        elif data is None:
            return (None,None)
        else:
            msg = "Don't know how to iterate over data type: {!s}".format(type(data))
            raise TypeError(msg)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def hoverOver(self, items):
        #print "FlowchartWidget.hoverOver called."
        term = None
        for item in items:
            if item is self.hoverItem:
                return
            self.hoverItem = item
            if hasattr(item, 'term') and isinstance(item.term, Terminal):
                term = item.term
                break
        if term is None:
            self.hoverText.setPlainText("")
        else:
            val = term.value()
            if isinstance(val, ndarray):
                val = "%s %s %s" % (type(val).__name__, str(val.shape), str(val.dtype))
            else:
                val = str(val)
                if len(val) > 400:
                    val = val[:400] + "..."
            self.hoverText.setPlainText("%s.%s = %s" % (term.node().name(), term.name(), val))
            #self.hoverLabel.setCursorPosition(0)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def applyFilter(data, b, a, padding=100, bidir=True):
    """Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
    and/or run the filter in both directions."""
    try:
        import scipy.signal
    except ImportError:
        raise Exception("applyFilter() requires the package scipy.signal.")

    d1 = data.view(np.ndarray)

    if padding > 0:
        d1 = np.hstack([d1[:padding], d1, d1[-padding:]])

    if bidir:
        d1 = scipy.signal.lfilter(b, a, scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
    else:
        d1 = scipy.signal.lfilter(b, a, d1)

    if padding > 0:
        d1 = d1[padding:-padding]

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d1, info=data.infoCopy())
    else:
        return d1
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
    """return data passed through bessel filter"""
    try:
        import scipy.signal
    except ImportError:
        raise Exception("besselFilter() requires the package scipy.signal.")

    if dt is None:
        try:
            tvals = data.xvals('Time')
            dt = (tvals[-1]-tvals[0]) / (len(tvals)-1)
        except:
            dt = 1.0

    b,a = scipy.signal.bessel(order, cutoff * dt, btype=btype) 

    return applyFilter(data, b, a, bidir=bidir)
    #base = data.mean()
    #d1 = scipy.signal.lfilter(b, a, data.view(ndarray)-base) + base
    #if (hasattr(data, 'implements') and data.implements('MetaArray')):
        #return MetaArray(d1, info=data.infoCopy())
    #return d1
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def modeFilter(data, window=500, step=None, bins=None):
    """Filter based on histogram-based mode function"""
    d1 = data.view(np.ndarray)
    vals = []
    l2 = int(window/2.)
    if step is None:
        step = l2
    i = 0
    while True:
        if i > len(data)-step:
            break
        vals.append(mode(d1[i:i+window], bins))
        i += step

    chunks = [np.linspace(vals[0], vals[0], l2)]
    for i in range(len(vals)-1):
        chunks.append(np.linspace(vals[i], vals[i+1], step))
    remain = len(data) - step*(len(vals)-1) - l2
    chunks.append(np.linspace(vals[-1], vals[-1], remain))
    d2 = np.hstack(chunks)

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d2, info=data.infoCopy())
    return d2
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _axisSlice(self, i, cols):
        #print "axisSlice", i, cols
        if 'cols' in self._info[i] or 'values' in self._info[i]:
            ax = self._axisCopy(i)
            if 'cols' in ax:
                #print "  slicing columns..", array(ax['cols']), cols
                sl = np.array(ax['cols'])[cols]
                if isinstance(sl, np.ndarray):
                    sl = list(sl)
                ax['cols'] = sl
                #print "  result:", ax['cols']
            if 'values' in ax:
                ax['values'] = np.array(ax['values'])[cols]
        else:
            ax = self._info[i]
        #print "     ", ax
        return ax
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __getitem__(self, i):
        '''
        Get the item or slice :attr:`i`.
        '''
        obj = super(IrregularlySampledSignal, self).__getitem__(i)
        if isinstance(i, int):  # a single point in time across all channels
            obj = pq.Quantity(obj.magnitude, units=obj.units)
        elif isinstance(i, tuple):
            j, k = i
            if isinstance(j, int):  # a single point in time across some channels
                obj = pq.Quantity(obj.magnitude, units=obj.units)
            else:
                if isinstance(j, slice):
                    obj.times = self.times.__getitem__(j)
                elif isinstance(j, np.ndarray):
                    raise NotImplementedError("Arrays not yet supported")
                else:
                    raise TypeError("%s not supported" % type(j))
                if isinstance(k, int):
                    obj = obj.reshape(-1, 1)
        elif isinstance(i, slice):
            obj.times = self.times.__getitem__(i)
        else:
            raise IndexError("index should be an integer, tuple or slice")
        return obj
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _check_annotations(value):
    """
    Recursively check that value is either of a "simple" type (number, string,
    date/time) or is a (possibly nested) dict, list or numpy array containing
    only simple types.
    """
    if isinstance(value, np.ndarray):
        if not issubclass(value.dtype.type, ALLOWED_ANNOTATION_TYPES):
            raise ValueError("Invalid annotation. NumPy arrays with dtype %s"
                             "are not allowed" % value.dtype.type)
    elif isinstance(value, dict):
        for element in value.values():
            _check_annotations(element)
    elif isinstance(value, (list, tuple)):
        for element in value:
            _check_annotations(element)
    elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):
        raise ValueError("Invalid annotation. Annotations of type %s are not"
                         "allowed" % type(value))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def _check_time_in_range(value, t_start, t_stop, view=False):
    '''
    Verify that all times in :attr:`value` are between :attr:`t_start`
    and :attr:`t_stop` (inclusive.

    If :attr:`view` is True, vies are used for the test.
    Using drastically increases the speed, but is only safe if you are
    certain that the dtype and units are the same
    '''

    if not value.size:
        return

    if view:
        value = value.view(np.ndarray)
        t_start = t_start.view(np.ndarray)
        t_stop = t_stop.view(np.ndarray)

    if value.min() < t_start:
        raise ValueError("The first spike (%s) is before t_start (%s)" %
                         (value, t_start))
    if value.max() > t_stop:
        raise ValueError("The last spike (%s) is after t_stop (%s)" %
                         (value, t_stop))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def assert_arrays_almost_equal(a, b, threshold, dtype=False):
    '''
    Check if two arrays have the same shape and contents that differ
    by abs(a - b) <= threshold for all elements.

    If threshold is None, do an absolute comparison rather than a relative
    comparison.
    '''
    if threshold is None:
        return assert_arrays_equal(a, b, dtype=dtype)

    assert isinstance(a, np.ndarray), "a is a %s" % type(a)
    assert isinstance(b, np.ndarray), "b is a %s" % type(b)
    assert a.shape == b.shape, "%s != %s" % (a, b)
    #assert a.dtype == b.dtype, "%s and %b not same dtype %s %s" % (a, b,
    #                                                               a.dtype,
    #                                                               b.dtype)
    if a.dtype.kind in ['f', 'c', 'i']:
        assert (abs(a - b) < threshold).all(), \
            "abs(%s - %s)    max(|a - b|) = %s    threshold:%s" % \
            (a, b, (abs(a - b)).max(), threshold)

    if dtype:
        assert a.dtype == b.dtype, \
            "%s and %s not same dtype %s and %s" % (a, b, a.dtype, b.dtype)
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def write(filename, predictions):
    ''' Write prediction scores in prescribed format'''
    with open(filename, "w") as output_file:
        for row in predictions:
            if type(row) is not np.ndarray and type(row) is not list:
                row = [row]
            for val in row:
                output_file.write('{:g} '.format(float(val)))
            output_file.write('\n')
项目:lang-reps    作者:chaitanyamalaviya    | 项目源码 | 文件源码
def get_concatenated_sets(lang_codes, feature_set_str):
    feature_set_parts = feature_set_str.split("+")
    feature_names = []
    feature_values = np.ndarray((len(lang_codes),0))
    for feature_set_part in feature_set_parts:
        more_feature_names, more_feature_values = get_union_sets(lang_codes, feature_set_part)
        feature_names += more_feature_names
        feature_values = np.concatenate([feature_values, more_feature_values], axis=1)
    return feature_names, feature_values
项目:ngspicepy    作者:ashwith    | 项目源码 | 文件源码
def test_get_vector(self):
        net = nt.Netlist(netlists_path + 'dc_ac_check.net')
        net.setup_sim('dc', 'v1 0 1 .3')
        net.run()
        val = net.get_vector('V(1)', 'dc1')
        assert isinstance(val, numpy.ndarray)
        assert len(val) == 4
        ng.reset()
项目:ngspicepy    作者:ashwith    | 项目源码 | 文件源码
def test_real(self):
        val = ng.get_data('const.e')
        assert type(val) == np.ndarray
        assert len(val) == 1
        assert val.dtype == 'float64'
        assert val[0] == pytest.approx(np.e)
项目:ngspicepy    作者:ashwith    | 项目源码 | 文件源码
def test_cmplx(self):
        val = ng.get_data('const.i')
        assert type(val) == np.ndarray
        assert len(val) == 1
        assert val.dtype == 'complex128'
        assert val[0] == pytest.approx(1j)
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def apply(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return np.dot(x.reshape((s[0],np.prod(s[1:]))) - self.mean.get_value(), self.ZCA_mat.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return T.dot(x.flatten(2) - self.mean.dimshuffle('x',0), self.ZCA_mat).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")
项目:GELUs    作者:hendrycks    | 项目源码 | 文件源码
def invert(self, x):
        s = x.shape
        if isinstance(x, np.ndarray):
            return (np.dot(x.reshape((s[0],np.prod(s[1:]))), self.inv_ZCA_mat.get_value()) + self.mean.get_value()).reshape(s)
        elif isinstance(x, T.TensorVariable):
            return (T.dot(x.flatten(2), self.inv_ZCA_mat) + self.mean.dimshuffle('x',0)).reshape(s)
        else:
            raise NotImplementedError("Whitening only implemented for numpy arrays or Theano TensorVariables")

# T.nnet.relu has some issues with very large inputs, this is more stable
项目:drl.pth    作者:seba-1511    | 项目源码 | 文件源码
def _clip(self, action):
        maxs = self.env.action_space.high
        mins = self.env.action_space.low
        if isinstance(action, np.ndarray):
            np.clip(action, mins, maxs, out=action)
        elif isinstance(action, list):
            for i in range(len(action)):
                action[i] = clip(action[i], mins[i], maxs[i])
        else:
            action = clip(action, mins[0], maxs[0])
        return action
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def get_minibatches(data, minibatch_size, shuffle=True):
    """
    Iterates through the provided data one minibatch at at time. You can use this function to
    iterate through data in minibatches as follows:

        for inputs_minibatch in get_minibatches(inputs, minibatch_size):
            ...

    Or with multiple data sources:

        for inputs_minibatch, labels_minibatch in get_minibatches([inputs, labels], minibatch_size):
            ...

    Args:
        data: there are two possible values:
            - a list or numpy array
            - a list or tuple where each element is either a list or numpy array
        minibatch_size: the maximum number of items in a minibatch
        shuffle: whether to randomize the order of returned data
    Returns:
        minibatches: the return value depends on data:
            - If data is a list/array it yields the next minibatch of data.
            - If data a list of lists/arrays it returns the next minibatch of each element in the
              list. This can be used to iterate through multiple data sources
              (e.g., features and labels) at the same time.

    """
    list_data = isinstance(data, (list, tuple)) and isinstance(data[0], (list,np.ndarray))
    data_size = len(data[0]) if list_data else len(data)
    indices = np.arange(data_size)
    if shuffle:
        np.random.shuffle(indices)
    for minibatch_start in np.arange(0, data_size, minibatch_size):
        minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]
        yield [minibatch(d, minibatch_indices) for d in data] if list_data \
            else minibatch(data, minibatch_indices)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def minibatch(data, minibatch_idx):
    return data[minibatch_idx] if isinstance(data, np.ndarray) else [data[i] for i in minibatch_idx]