Python numpy 模块,ndarrays() 实例源码

我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用numpy.ndarrays()

项目:MulensModel    作者:rpoleski    | 项目源码 | 文件源码
def __init__(self, data=None, magnification=None, n_sources=None):
        # Initialize self._datasets, self._magnification, and self._n_sources
        if isinstance(data, list):
            self._datasets = data
        else:
            self._datasets = [data]

        if isinstance(magnification, list):
            self._magnification = magnification
        else:
            self._magnificaiton = [magnification]

        if magnification is None and n_sources is None:
            raise ValueError(
                'Fit class requires magnifications vectors' +
                ' or number of sources directly specified')
        self._n_sources = n_sources

        # Set up numpy ndarrays for flux parameters
        self._flux_blending = dict()
        self._flux_sources = dict()
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def estimate(self, X, **kwargs):
        if not isinstance(X, Iterable):
            if isinstance(X, np.ndarray) or \
                    (isinstance(X, (list, tuple)) and len(X) > 0 and all([isinstance(x, np.ndarray) for x in X])):
                X = DataInMemory(X, self.chunksize)
                self.data_producer = X
            else:
                raise ValueError("no np.ndarray or non-empty list of np.ndarrays given")

        # run estimation
        try:
            super(StreamingTransformer, self).estimate(X, **kwargs)
        except NotConvergedWarning as ncw:
            self._logger.info(
                "Presumely finished estimation. Message: %s" % ncw)
        # memory mode? Then map all results. Avoid recursion here, if parametrization
        # is triggered from get_output
        if self.in_memory and not self._mapping_to_mem_active:
            self._map_to_memory()

        self._estimated = True

        return self
项目:2048    作者:vhalis    | 项目源码 | 文件源码
def breed_new_generation(self, weights, scores=None):
        # Weights is a list of lists of numpy.ndarrays
        # Breed generation in a 'seed' competition format
        seeds = len(weights)
        partner_offset = -1 if self.polygamous else 0
        polygamy_offset = 0 if not self.polygamous else 1
        next_gen = [None for _ in xrange(self.generation_size)]
        for offspring_num in xrange(self.generation_size):
            idx = offspring_num % seeds
            if idx == 0:
                partner_offset += polygamy_offset
            pair_idx = (seeds - offspring_num - partner_offset) % seeds
            if pair_idx == idx:
                # Don't breed with self - use highest seed instead
                pair_idx = 0 if idx != 0 else 1
            if scores:
                next_gen[offspring_num] = self.breed_organisms(
                    weights[idx], weights[pair_idx],
                    scores[idx], scores[pair_idx]
                    )
            else:
                next_gen[offspring_num] = self.breed_organisms(
                    weights[idx], weights[pair_idx],
                    )
        return next_gen
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def _as_numpy(*args):
    """Given an iterable (a 1d list, np.ndarray, pd.Series, 
    pd.DataFrame or H2OFrame), convert it into a 1d np.ndarray
    for further processing.

    Returns
    -------
    arrs : list
        Returns a list (of 1d np.ndarrays) of length==len(args)
    """
    def _single_as_numpy(x):
        if not isinstance(x, np.ndarray):
            # if an H2OFrame, just return the first col
            if isinstance(x, H2OFrame):
                # same as ..h2o.util.h2o_col_to_numpy, but
                # that causes circular dependency in imports.
                if not x.shape[1] == 1:
                    raise ValueError('must be 1d column')
                _1d = x[x.columns[0]].as_data_frame(use_pandas=True)
                return _1d[_1d.columns[0]].values
            elif is_iterable(x):
                return np.asarray(x)
            else:
                raise TypeError('cannot create numpy array out of type=%s' % type(x))
        else:
            return np.copy(x)

    arrs = [_single_as_numpy(i) for i in args]
    if len(arrs) == 1:
        arrs = arrs[0]

    return arrs
项目:multi-gpu-keras-tf    作者:sallamander    | 项目源码 | 文件源码
def _load_data(self, nb_obs=None):
        """Load the dataset specified by self.name

        :param nb_obs: optional; int for the number of observations to retain
         from the training & testing sets; if None, retain the full training
         and testing sets
        :return: a tuple of 4 np.ndarrays (x_train, y_train, x_test, y_test)
        """

        dataset = getattr(keras.datasets, self.name)
        train_data, test_data = dataset.load_data()
        x_train, y_train = train_data[0] / 255., train_data[1]
        x_test, y_test = test_data[0] / 255., test_data[1]

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        if self.name == 'mnist':
            x_train = np.expand_dims(x_train, axis=-1)
            x_test = np.expand_dims(x_test, axis=-1)

        if nb_obs:
            x_train = x_train[:nb_obs]
            y_train = y_train[:nb_obs]

            x_test = x_test[:nb_obs]
            y_test = y_test[:nb_obs]

        return x_train, y_train, x_test, y_test
项目:cuvarbase    作者:johnh2o2    | 项目源码 | 文件源码
def run(self, data, memory=None, **kwargs):
        """
        Run the adjoint NFFT on a batch of data

        Parameters
        ----------
        data: list of tuples
            list of [(t, y, w), ...] containing
            * ``t``: observation times
            * ``y``: observations
            * ``nf``: int, size of NFFT
        memory:
        **kwargs

        Returns
        -------
        powers: list of np.ndarrays
            List of adjoint NFFTs

        """
        if not hasattr(self, 'prepared_functions') or \
            not all([func in self.prepared_functions
                     for func in self.function_names]):
            self._compile_and_prepare_functions(**kwargs)

        if memory is None:
            memory = self.allocate(data, **kwargs)

        nfft_kwargs = dict(block_size=self.block_size)
        nfft_kwargs.update(kwargs)

        results = [nfft_adjoint_async(mem, self.function_tuple,
                                      **nfft_kwargs)
                   for mem in memory]

        return results
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def finetune(self, X, Y, batch_size=32, gp_n_iter=1, verbose=1):
        """Finetune the output GP layers assuming the network is pre-trained.

        Arguments:
        ----------
            X : np.ndarray or list of np.ndarrays
            Y : np.ndarray or list of np.ndarrays
            batch_size : uint (default: 128)
                Batch size used for data streaming through the network.
            gp_n_iter : uint (default: 100)
                Number of iterations for GP training.
            verbose : uint (default: 1)
                Verbosity mode, 0 or 1.
        """
        # Validate user data
        X = _standardize_input_data(
            X, self.input_names, self.internal_input_shapes,
            check_batch_axis=False)

        H = self.transform(X, batch_size=batch_size)

        if verbose:
            print("Finetuning output GPs...")

        for gp, h, y in zip(self.output_gp_layers, H, Y):
            # Update GP data (and grid if necessary)
            gp.backend.update_data('tr', h, y)
            if gp.update_grid:
                gp.backend.update_grid('tr')

            # Train GP
            gp.hyp = gp.backend.train(gp_n_iter, verbose=verbose)

        if verbose:
            print("Done.")
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def evaluate(self, X, Y, batch_size=32, verbose=0):
        """Compute NLML on the given data.

        Arguments:
        ----------
            X : np.ndarray or list of np.ndarrays
            Y : np.ndarray or list of np.ndarrays
            batch_size : uint (default: 128)
            verbose : uint (default: 0)
                Verbosity mode, 0 or 1.

        Returns:
        --------
            nlml : float
        """
        # Validate user data
        X, Y, _ = self._standardize_user_data(
            X, Y,
            sample_weight=None,
            class_weight=None,
            check_batch_axis=False,
            batch_size=batch_size)

        H = self.transform(X, batch_size=batch_size)

        nlml = 0.
        for gp, h, y in zip(self.output_gp_layers, H, Y):
            nlml += gp.backend.evaluate('tmp', h, y)

        return nlml
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def push(self, name, var):
        # Convert np.ndarrays into matlab.doubles and push into the workspace
        if type(var) is np.ndarray:
            self._eng.workspace[name] = self._matarray(var.tolist())
        elif type(var) is dict:
            var_copy = var.copy()
            for k, v in var_copy.iteritems():
                if type(v) is np.ndarray:
                    var_copy[k] = self._matarray(v.tolist())
            self._eng.workspace[name] = var_copy
        elif type(var) in {list, int, float}:
            self._eng.workspace[name] = var
        else:
            raise ValueError("Unknown type (%s) variable being pushed "
                             "into the MATLAB session." % type(var))
项目:GTSRB-caffe-model    作者:magnusja    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=1):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:,:,np.newaxis])
        else:
            caffe_images.append(image)

    caffe_images = np.array(caffe_images)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        output = net.forward()[net.outputs[-1]]
        if scores is None:
            scores = np.copy(output)
        else:
            scores = np.vstack((scores, output))
        print 'Processed %s/%s images ...' % (len(scores), len(caffe_images))

    return scores
项目:Barebones-Flask-and-Caffe-Classifier    作者:alex-paterson    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=1):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:,:,np.newaxis])
        else:
            caffe_images.append(image)

    caffe_images = np.array(caffe_images)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        output = net.forward()[net.outputs[-1]]
        if scores is None:
            scores = np.copy(output)
        else:
            scores = np.vstack((scores, output))
        print 'Processed %s/%s images ...' % (len(scores), len(caffe_images))

    return scores
项目:qudi    作者:Ulm-IQO    | 项目源码 | 文件源码
def set_data(self, x=None, y=None, clear_old=True):
        """Set the data to plot

        @param np.ndarray/list or list of np.ndarrays/lists x: data of independents variable(s)
        @param np.ndarray/list or list of np.ndarrays/lists y: data of dependent variable(s)
        @param bool clear_old: clear old plots in GUI if True
        """

        if x is None:
            self.log.error('No x-values provided, cannot set plot data.')
            return -1

        if y is None:
            self.log.error('No y-values provided, cannot set plot data.')
            return -1

        self.clear_old = clear_old
        # check if input is only an array (single plot) or a list of arrays (several plots)
        if len(x) == 1:
            self.indep_vals = [x]
            self.depen_vals = [y]
        else:
            self.indep_vals = x
            self.depen_vals = y

        self.sigPlotDataUpdated.emit()
        self.sigPlotParamsUpdated.emit()

        self.set_domain()
        self.set_range()
        return
项目:2048    作者:vhalis    | 项目源码 | 文件源码
def save_generation(self, weights):
        # Weights is a list of lists of numpy.ndarrays
        for idx, weight_array in enumerate(weights):
            numpy.savez(self.get_save_file(idx), *weight_array)
项目:loompy    作者:linnarsson-lab    | 项目源码 | 文件源码
def __getitem__(self, slice: Tuple[Union[int, np.ndarray, slice], Union[int, np.ndarray, slice]]) -> np.ndarray:
        """
        Get a slice of the main matrix.
        Args:
            slice:      A 2D slice object (see http://docs.h5py.org/en/latest/high/dataset.html) or np.ndarrays or ints
        Returns:
            A numpy matrix
        """
        return self.layers[""][slice]
项目:skutil    作者:tgsmith61591    | 项目源码 | 文件源码
def h2o_f_classif(X, feature_names, target_feature):
    """Compute the ANOVA F-value for the provided sample.
    This method is adapted from ``sklearn.feature_selection.f_classif``
    to function on H2OFrames.

    Parameters
    ----------

    X : ``H2OFrame``, shape=(n_samples, n_features)
        The feature matrix. Each feature will be tested 
        sequentially.

    feature_names : array_like (str), optional (default=None)
        The list of names on which to fit the transformer.

    target_feature : str, optional (default=None)
        The name of the target feature (is excluded from the fit)
        for the estimator.


    Returns
    -------

    f : float
        The computed F-value of the test.

    prob : float
        The associated p-value from the F-distribution.
    """
    frame = check_frame(X, copy=False)

    # first, get unique values of y
    y = X[target_feature]
    _, unq = _unq_vals_col(y)

    # if y is enum, make the unq strings..
    unq = unq[_] if not y.isfactor()[0] else [str(i) for i in unq[_]]

    # get the masks
    args = [frame[y == k, :][feature_names] for k in unq]
    f, prob = h2o_f_oneway(*args)
    return f, prob


# The following function is a rewriting (of the sklearn rewriting) of 
# scipy.stats.f_oneway. Contrary to the scipy.stats.f_oneway implementation 
# it does not copy the data while keeping the inputs unchanged. Furthermore,
# contrary to the sklearn implementation, it does not use np.ndarrays, rather
# amending 1d H2OFrames inplace.
项目:keras-gp    作者:alshedivat    | 项目源码 | 文件源码
def predict(self, X, X_tr=None, Y_tr=None,
                batch_size=32, return_var=False, verbose=0):
        """Generate output predictions for the input samples batch by batch.

        Arguments:
        ----------
            X : np.ndarray or list of np.ndarrays
            batch_size : uint (default: 128)
            return_var : bool (default: False)
                Whether predictive variance is returned.
            verbose : uint (default: 0)
                Verbosity mode, 0 or 1.

        Returns:
        --------
            preds : a list or a tuple of lists
                Lists of output predictions and variance estimates.
        """
        # Update GP data if provided (and grid if necessary)
        if X_tr is not None and Y_tr is not None:
            X_tr, Y_tr, _ = self._standardize_user_data(
                X_tr, Y_tr,
                sample_weight=None,
                class_weight=None,
                check_batch_axis=False,
                batch_size=batch_size)
            H_tr = self.transform(X_tr, batch_size=batch_size)
            for gp, h, y in zip(self.output_gp_layers, H_tr, Y_tr):
                gp.backend.update_data('tr', h, y)
                if gp.update_grid:
                    gp.backend.update_grid('tr')

        # Validate user data
        X = _standardize_input_data(
            X, self.input_names, self.internal_input_shapes,
            check_batch_axis=False)

        H = self.transform(X, batch_size=batch_size)

        preds = []
        for gp, h in zip(self.output_gp_layers, H):
            preds.append(gp.backend.predict(h, return_var=return_var))

        if return_var:
            preds = map(list, zip(*preds))

        return preds


# Apply tweaks
项目:toothless    作者:ratt-ru    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=1):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:,:,np.newaxis])
        else:
            caffe_images.append(image)

    caffe_images = np.array(caffe_images)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        output = net.forward()[net.outputs[-1]]
        if scores is None:
            scores = output
        else:
            scores = np.vstack((scores, output))
        #print 'Processed %s/%s images ...' % (len(scores), len(caffe_images))

    return scores

# Resolve labels
项目:2048    作者:vhalis    | 项目源码 | 文件源码
def __init__(self,
                 hidden_sizes=DEFAULT_HIDDEN_SIZES,
                 weights=DEFAULT_WEIGHTS,
                 inputs=DEFAULT_INPUTS,
                 outputs=DEFAULT_OUTPUTS,
                 weight_spread=None,
                 weight_middle=None):
        """
        @hidden_sizes: An iterable of integers that describe the sizes of the
                       hidden layers of the Net.
        @weights: May be a function that returns arrays to use as weights.
                  If so, must take an iterable of sizes to create weights for
                  and must return the same data as described below.
                  Else it must be numpy.ndarrays of dtype=float and proper sizes
                  in the proper order provided in a sliceable.
        @inputs: The integer number of inputs.
        @outputs: The integer number of outputs.
        """
        if not isinstance(inputs, int) or not isinstance(outputs, int):
            raise ValueError('Number of inputs and outputs must be integers')
        if (not hasattr(hidden_sizes, '__iter__')
                or not all(isinstance(i, int) for i in hidden_sizes)):
            raise ValueError('Sizes of hidden layers must be integers'
                             ' provided in an iterable')

        self.sizes = tuple(chain((inputs,),
                                 hidden_sizes,
                                 (outputs,)))
        if weights and callable(weights):
            weights = weights(self.sizes)
        if (weights and (not hasattr(weights, '__getslice__')
                         or not all(isinstance(arr, numpy.ndarray)
                                     for arr in weights)
                         or not all(arr.dtype == float for arr in weights))):
            raise ValueError('Weights of hidden layers must be numpy.ndarrays'
                             ' with dtype=float provided in a sliceable')

        self.inputs = inputs
        self.outputs = outputs
        self.weights = weights or Net.random_weights(self.sizes,
                                                      weight_spread,
                                                      weight_middle)
        for idx, w in enumerate(self.weights):
            assert(w.shape == (self.sizes[idx], self.sizes[idx+1]))
项目:BirdAudioDetectionChallenge2016    作者:RSPB    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=None):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    if batch_size is None:
        batch_size = 1

    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:, :, np.newaxis])
        else:
            caffe_images.append(image)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x + batch_size] for x in range(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        start = time.time()
        output = net.forward()[net.outputs[-1]]
        end = time.time()
        if scores is None:
            scores = np.copy(output)
        else:
            scores = np.vstack((scores, output))
        print('Processed %s/%s images in %f seconds ...' % (len(scores), len(caffe_images), (end - start)))

    return scores
项目:WasIstDasFuer1Drone    作者:magnusja    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=None):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    if batch_size is None:
        batch_size = 1

    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:,:,np.newaxis])
        else:
            caffe_images.append(image)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        start = time.time()
        output = net.forward()[net.outputs[-1]]
        end = time.time()
        if scores is None:
            scores = np.copy(output)
        else:
            scores = np.vstack((scores, output))
        print 'Processed %s/%s images in %f seconds ...' % (len(scores), len(caffe_images), (end - start))

    return scores
项目:WasIstDasFuer1Drone    作者:magnusja    | 项目源码 | 文件源码
def forward_pass(images, net, transformer, batch_size=None):
    """
    Returns scores for each image as an np.ndarray (nImages x nClasses)

    Arguments:
    images -- a list of np.ndarrays
    net -- a caffe.Net
    transformer -- a caffe.io.Transformer

    Keyword arguments:
    batch_size -- how many images can be processed at once
        (a high value may result in out-of-memory errors)
    """
    if batch_size is None:
        batch_size = 1

    caffe_images = []
    for image in images:
        if image.ndim == 2:
            caffe_images.append(image[:,:,np.newaxis])
        else:
            caffe_images.append(image)

    dims = transformer.inputs['data'][1:]

    scores = None
    for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
        new_shape = (len(chunk),) + tuple(dims)
        if net.blobs['data'].data.shape != new_shape:
            net.blobs['data'].reshape(*new_shape)
        for index, image in enumerate(chunk):
            image_data = transformer.preprocess('data', image)
            net.blobs['data'].data[index] = image_data
        start = time.time()
        output = net.forward()[net.outputs[-1]]
        end = time.time()
        if scores is None:
            scores = np.copy(output)
        else:
            scores = np.vstack((scores, output))
        print 'Processed %s/%s images in %f seconds ...' % (len(scores), len(caffe_images), (end - start))

    return scores