Python numpy 模块,arrays() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.arrays()

项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def xmatch_basic(ra1, dec1, ra2, dec2, match_radius=5.0):
    '''
    This is a quick matcher that uses great_circle_dist to find the closest
    object in (ra2,dec2) within match_radius to (ra1,dec1). (ra1,dec1) must be a
    scalar pair, while (ra2,dec2) must be np.arrays of the same lengths.

    PARAMETERS:
    ra1/dec1: coordinates of the target to match
    ra2/dec2: coordinate np.arrays of the list of coordinates to match to

    RETURNS:

    A tuple like the following:

    (True -> no match or False -> matched,
     minimum distance between target and list)

    '''

    min_dist_arcsec = np.min(great_circle_dist(ra1,dec1,ra2,dec2))

    if (min_dist_arcsec < match_radius):
        return (True,min_dist_arcsec)
    else:
        return (False,min_dist_arcsec)
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def get_IOU(rec1, rec2):
    """
    rec1&2 are both np.arrays with x_center, y_center, width, height
    should work with any dimension as long as the last dimension is 4
    """

    rec1_xy_max = rec1[..., :2] + (rec1[..., 2:4] - 1) / 2
    rec1_xy_min = rec1[..., :2] - (rec1[..., 2:4] - 1) / 2

    rec2_xy_max = rec2[..., :2] + (rec2[..., 2:4] - 1) / 2
    rec2_xy_min = rec2[..., :2] - (rec2[..., 2:4] - 1) / 2

    intersec_max = np.minimum(rec1_xy_max, rec2_xy_max)
    intersec_min = np.maximum(rec1_xy_min, rec2_xy_min)

    intersec_wh = np.maximum(intersec_max - intersec_min + 1, 0)

    intersec_area = intersec_wh[..., 0] * intersec_wh[..., 1]

    area1 = rec1[..., 2] * rec1[..., 3]
    area2 = rec2[..., 2] * rec2[..., 3]

    union = area1 + area2 - intersec_area

    return intersec_area / union
项目:piecewise    作者:DataDog    | 项目源码 | 文件源码
def _preprocess(t, v):
    """ Raises and exception if any of the inputs are not valid.
    Otherwise, returns a list of Points, ordered by t.
    """
    # Validate the inputs.
    if len(t) != len(v):
        raise ValueError('`t` and `v` must have the same length.')
    t_arr, v_arr = np.array(t), np.array(v)
    if not np.all(np.isfinite(t)):
        raise ValueError('All values in `t` must be finite.')
    finite_mask = np.isfinite(v_arr)
    if np.sum(finite_mask) < 2:
        raise ValueError('`v` must have at least 2 finite values.')
    t_arr, v_arr = t_arr[finite_mask], v_arr[finite_mask]
    if len(np.unique(t_arr)) != len(t_arr):
        raise ValueError('All `t` values must be unique.')

    # Order both arrays by t-values.
    sort_order = np.argsort(t_arr)
    t_arr, v_arr = t_arr[sort_order], v_arr[sort_order]

    return t_arr, v_arr
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def _limit_params(samples, selector=None):
    """Pick only the selected parameters from all samples.

    Parameters
    ----------
    samples : OrderedDict of np.arrays
    selector : iterable of ints or strings, optional
        Indices or keys to use from samples. Default to all.

    Returns
    -------
    selected : OrderedDict of np.arrays

    """
    if selector is None:
        return samples
    else:
        selected = OrderedDict()
        for ii, k in enumerate(samples):
            if ii in selector or k in selector:
                selected[k] = samples[k]
        return selected
项目:keras-neural-graph-fingerprint    作者:keiserlab    | 项目源码 | 文件源码
def to_config(self, jsonify=False):
        ''' Returns a dict that can be used to recreate the file efficiently

        # Arguments:
            jsonify (bool): If True, dict will be jsonifiably (no `np.arrays`)

        # Returns:
            config (dict): that can be used in `SparseTensor.from_config`

        '''
        if jsonify:
            nonsparse_indices = [i.tolist() for i in self.nonsparse_indices]
            nonsparse_values = self.nonsparse_values.tolist()
        else:
            nonsparse_indices = self.nonsparse_indices
            nonsparse_values = self.nonsparse_values

        return dict(nonsparse_indices=nonsparse_indices, nonsparse_values=nonsparse_values,
                    default_value=self.default_value, dtype=str(self.dtype),
                    main_axis=self.main_axis, max_shape=self.max_shape,)
项目:theia    作者:bandang0    | 项目源码 | 文件源码
def rotMatrix(a,b):
    '''Provides the rotation matrix which maps a (unit) to b (unit).

    a,b: unit 3D vectors. [3D np.arrays]

    Returns an np.array such that np.dot(M,a) == b.

    '''

    if np.abs(np.dot(a,b)) == 1.:
        return np.dot(a,b) *np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
                                    dtype=np.float64)

    v = np.cross(a,b)
    vx = np.array([0., -v[2], v[1]], [v[2], 0., -v[0]], [-v[1], v[0], 0.],
                    dtype=np.float64)

    return np.array([1., 0., 0.], [0., 1., 0.], [0., 0., 1.], dtype=np.float64)\
            + vx + (1.0/(1.0 + np.dot(a,b)))*np.dot(vx,vx)
项目:NetDetect    作者:ericzhao28    | 项目源码 | 文件源码
def shuffle_twins(X, Y):
  '''
  Shuffle two np.arrays in parallel.
  Shuffles on axis=0.
  Args:
    - X (np.array)
    - Y (np.array)
  Return:
    - X_shuffled (np.array)
    - Y_shuffled (np.array)
  '''

  assert(X.shape[0] == Y.shape[0])

  rng_state = np.random.get_state()
  np.random.shuffle(X)
  np.random.set_state(rng_state)
  np.random.shuffle(Y)

  return X, Y
项目:picosdk-python-examples    作者:picotech    | 项目源码 | 文件源码
def get_min_max_data(self, index, unlock=True):
        """ Returns contents of the requested buffer in the form of 2 np.arrays
        This call applies only to aggregated mode
        :param index: buffer index number
        :type index: int
        :param unlock: Whether to release buffer after the call
        :type unlock: bool
        :returns: status of the calls, data_min, data_max
        :rtype: tuple(int, np.array, np.array)
        """
        if self._handle <= 0:
            return pico_num("PICO_INVALID_HANDLE"), [], []
        if index not in self._buffers.keys():
            return pico_num("PICO_INVALID_BUFFER"), [], []
        if unlock:
            self.unlock_buffer(index)
        with self._buffers[index].access_lock:
            if self._buffers[index].data_min is not None:
                return pico_num("PICO_OK"), self._buffers[index].data_min, self._buffers[index].data
            else:
                return pico_num("PICO_OK"), self._buffers[index].data, self._buffers[index].data
项目:picosdk-python-examples    作者:picotech    | 项目源码 | 文件源码
def get_ets_data(self, index, unlock=True):
        """ Returns contents of the requested buffer in the form of 2 numpy arrays: times, data
        :param index: buffer index number
        :type index: int
        :param unlock: Whether to release buffer after the call
        :type unlock: bool
        :returns: status of the calls, times, data_max
        :rtype: tuple(int, np.array, np.array)
        """
        if self._handle <= 0:
            return pico_num("PICO_INVALID_HANDLE"), [], []
        if index not in self._buffers.keys() or self._ets.time is None:
            return pico_num("PICO_INVALID_BUFFER"), [], []
        if unlock:
            self.unlock_buffer(index)
        return pico_num("PICO_OK"), self._ets.time, self._buffers[index].data
项目:nept    作者:vandermeerlab    | 项目源码 | 文件源码
def get_single_field(fields):
    """Finds neurons with and indices of single fields.

    Parameters
    ----------
    fields : dict
        Where the key is the neuron number (int), value is a list of arrays (int).
        Each inner array contains the indices for a given place field.
        Eg. Neurons 7, 3, 11 that have 2, 1, and 3 place fields respectively would be:
        {7: [[field], [field]], 3: [[field]], 11: [[field], [field], [field]]}

    Returns
    -------
    fields : dict
        Where the key is the neuron number (int), value is a list of arrays (int).
        Each inner array contains the indices for a given place field.
        Eg. For the above input, only neuron 3 would be output in this dict:
        {3: [[field]]}

    """
    fields_single = dict()
    for neuron in fields.keys():
        if len(fields[neuron]) == 1:
            fields_single[neuron] = fields[neuron]
    return fields_single
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _mean_prediction(self, mu, Y, h, t_z):
        """ Creates a h-step ahead mean prediction

        Parameters
        ----------
        mu : np.ndarray
            The past predicted values

        Y : np.ndarray
            The past data

        h : int
            How many steps ahead for the prediction

        t_z : np.ndarray
            A vector of (transformed) latent variables

        Returns
        ----------
        h-length vector of mean predictions
        """     

        # Create arrays to iteratre over
        Y_exp = Y.copy()

        # Loop over h time periods          
        for t in range(0,h):

            if self.ar != 0:
                Y_exp_normalized = (Y_exp[-self.ar:][::-1] - self._norm_mean) / self._norm_std
                new_value = self.predict_new(np.append(1.0, Y_exp_normalized), self.latent_variables.get_z_values())

            else:  
                new_value = self.predict_new(np.array([1.0]), self.latent_variables.get_z_values())

            Y_exp = np.append(Y_exp, [self.link(new_value)])

        return Y_exp
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _mean_prediction(self, mu, Y, h, t_z):
        """ Creates a h-step ahead mean prediction

        Parameters
        ----------
        mu : np.ndarray
            The past predicted values

        Y : np.ndarray
            The past data

        h : int
            How many steps ahead for the prediction

        t_z : np.ndarray
            A vector of (transformed) latent variables

        Returns
        ----------
        h-length vector of mean predictions
        """     

        # Create arrays to iteratre over
        Y_exp = Y.copy()

        # Loop over h time periods          
        for t in range(0,h):
            new_value = self.predict_new(Y_exp[-self.ar:][::-1], self.latent_variables.get_z_values())
            Y_exp = np.append(Y_exp, [self.link(new_value)])

        return Y_exp
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _get_scale_and_shape_sim(self, transformed_lvs):
        """ Obtains model scale, shape, skewness latent variables for
        a 2d array of simulations.

        Parameters
        ----------
        transformed_lvs : np.array
            Transformed latent variable vector (2d - with draws of each variable)

        Returns
        ----------
        - Tuple of np.arrays (each being scale, shape and skewness draws)
        """

        if self.scale is True:
            if self.shape is True:
                model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) 
                model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :])
            else:
                model_shape = np.zeros(transformed_lvs.shape[1])
                model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :])
        else:
            model_scale = np.zeros(transformed_lvs.shape[1])
            model_shape = np.zeros(transformed_lvs.shape[1])

        if self.skewness is True:
            model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :])
        else:
            model_skewness = np.zeros(transformed_lvs.shape[1])

        return model_scale, model_shape, model_skewness
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def plot_marginals(samples, selector=None, bins=20, axes=None, **kwargs):
    """Plot marginal distributions for parameters.

    Parameters
    ----------
    samples : OrderedDict of np.arrays
    selector : iterable of ints or strings, optional
        Indices or keys to use from samples. Default to all.
    bins : int, optional
        Number of bins in histogram.
    axes : one or an iterable of plt.Axes, optional

    Returns
    -------
    axes : np.array of plt.Axes

    """
    samples = _limit_params(samples, selector)
    ncols = kwargs.pop('ncols', 5)
    kwargs['sharey'] = kwargs.get('sharey', True)
    shape = (max(1, round(len(samples) / ncols + 0.5)), min(len(samples), ncols))
    axes, kwargs = _create_axes(axes, shape, **kwargs)
    axes = axes.ravel()
    for ii, k in enumerate(samples.keys()):
        axes[ii].hist(samples[k], bins=bins, **kwargs)
        axes[ii].set_xlabel(k)

    return axes
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def chi_squared(*simulated, observed):
    """Return Chi squared goodness of fit.

    Adjusts for differences in magnitude between dimensions.

    Parameters
    ----------
    simulated : np.arrays
    observed : tuple of np.arrays

    """
    simulated = np.column_stack(simulated)
    observed = np.column_stack(observed)
    d = np.sum((simulated - observed)**2. / observed, axis=1)
    return d
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def samples_array(self):
        """Return the samples as an array.

        The columns are in the same order as in self.parameter_names.

        Returns
        -------
        list of np.arrays

        """
        return np.column_stack(tuple(self.samples.values()))
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def __rrshift__(self, iterable):
        """
        Convert samples in iterable into mini-batches.

        Structure of output depends on fmt function used. If None
        output is a list of np.arrays

        :param iterable iterable: Iterable over samples.
        :return: Mini-batches
        :rtype: list of np.array if fmt=None
        """
        prefetch = self.prefetch
        batch_gen = self._batch_generator(iter(iterable))
        return PrefetchIterator(batch_gen, prefetch) if prefetch else batch_gen
项目:pyDAEDALUS    作者:lcbb    | 项目源码 | 文件源码
def intersect_lists(a, b):
    # in case `a` and `b` are np.arrays:
    if type(a) == np.ndarray:
        a = list(a.flatten())
    if type(b) == np.ndarray:
        b = list(b.flatten())

    # TODO: rewrite in a way that preverves order they're seen in?

    thing = set(a).intersection(set(b))
    return sorted(list(thing))
项目:pyDAEDALUS    作者:lcbb    | 项目源码 | 文件源码
def find(iterable, val):
    # A close-enough-to-a-match to matlab's `find` function.  Ideally, usage of
    # this function will be refactored out.

    # Some times, data comes in as an array.  Directly computing array equality
    # breaks, so they first need converted over to iterables.
    # If arrays, it is assumed the iterable is two dimensions and val is one.

    if type(iterable) == np.ndarray:
        iterable = [list(item) for item in iterable]
    if type(val) == np.ndarray:
        val = list(val)

    return [i for i in range(len(iterable)) if iterable[i] == val]
项目:fathom    作者:rdadolf    | 项目源码 | 文件源码
def get_random_batch(self):
    """Get random batch from np.arrays (not tf.train.shuffle_batch)."""
    n_examples = self.train_spectrograms.shape[0]
    random_sample = np.random.randint(n_examples, size=self.batch_size)
    return self.train_spectrograms[random_sample, :, :], self.train_labels[random_sample, :], self.train_seq_lens[random_sample]
项目:autodiff    作者:bgavran    | 项目源码 | 文件源码
def __init__(self, value, name=None):
        if name is None:
            name = str(value)  # this op is really slow for np.arrays?!
        super().__init__([], name)

        if isinstance(value, numbers.Number):
            self._value = np.array(value, dtype=np.float64)
        else:
            self._value = value
        self.shape = self._value.shape
项目:tcsl    作者:machinelearningnanodegree    | 项目源码 | 文件源码
def trainCvSplit(df, cvSize=0.30, rs=21):
    """
    # Simple Train and CV split on the dataset
    # Input: df: Transformed DataFrame of the Adult Dataset,
    #        cvSize: size of the cross_validation set,
    #        rs: random_state used for the CV split. (helps reproduce results)
    # returns: Tuple of Four np.arrays - (XTrain, XTest, yTrain, yTest).
    """
    labels = df['income'].values
    features = df.drop(['income'], axis=1).values
    kwargs = {'test_size': cvSize, 'random_state': rs}
    return train_test_split(features, labels, **kwargs)
项目:picosdk-python-examples    作者:picotech    | 项目源码 | 文件源码
def get_min_max_volts(self, index, scale=1.0, unlock=True):
        """ Returns contents of the requested buffer in the form of 2 np.arrays
        This call applies only to aggregated mode
        :param index: buffer index number
        :type index: int
        :param scale: scale of the data on return
        :type scale: float
        :param unlock: Whether to release buffer after the call
        :type unlock: bool
        :returns: status of the calls, data_min, data_max
        :rtype: tuple(int, np.array, np.array)
        """
        if self._handle <= 0:
            return pico_num("PICO_INVALID_HANDLE"), [], []
        if index not in self._buffers.keys():
            return pico_num("PICO_INVALID_BUFFER"), [], []
        with self._buffers[index].access_lock:
            if self._buffers[index].data is None:
                return pico_num("PICO_OK"), [], []
        if unlock:
            self.unlock_buffer(index)
        with self._buffers[index].access_lock:
            factor = \
                scale * \
                (self.m.Ranges.values[self._channel_set[self._buffers[index].channel].range] / self.info.max_adc)

            if self._buffers[index].data_min is not None:
                return pico_num("PICO_OK"), \
                       self._buffers[index].data_min * factor, self._buffers[index].data * factor
            else:
                a = self._buffers[index].data * factor
                return pico_num("PICO_OK"), a, a
项目:picosdk-python-examples    作者:picotech    | 项目源码 | 文件源码
def get_min_max_states(self, index, unlock=True):
        """ Returns contents of the requested buffer in the form of 2 multidimensional np.arrays
        This call applies only to aggregated mode
        :param index: buffer index number
        :type index: int
        :param unlock: Whether to release buffer after the call
        :type unlock: bool
        :returns: status of the calls, data_min, data_max
        :rtype: tuple(int, np.ndarray, np.ndarray)
        """
        if self._handle <= 0:
            return pico_num("PICO_INVALID_HANDLE"), [], []
        if index not in self._buffers.keys():
            return pico_num("PICO_INVALID_BUFFER"), [], []
        if unlock:
            self.unlock_buffer(index)
        with self._buffers[index].access_lock:
            if self._buffers[index].data is None:
                return pico_num("PICO_OK"), [], []
            if self._buffers[index].data_min is not None:
                return pico_num("PICO_OK"), np.array(
                    [self._buffers[index].data_min & (1 << b) for b in range(0, 8)], dtype=bool), np.array(
                    [self._buffers[index].data & (1 << b) for b in range(0, 8)], dtype=bool)
            else:
                a = np.array([self._buffers[index].data & (1 << b) for b in range(0, 8)], dtype=bool)
                return pico_num("PICO_OK"), a, a
项目:blues    作者:MobleyLab    | 项目源码 | 文件源码
def _findDart(self, nc_context):
        """
        Helper function to dynamically update dart positions based on the current positions
        of the basis particles.
        Arguments
        ---------
        nc_context: Context object from simtk.openmm
            Context from the ncmc simulation.

        Returns
        -------
        dart_list list of 1x3 np.arrays in units.nm
            new dart positions calculated from the particle_pairs
            and particle_weights.

        """

        basis_particles = self.basis_particles
        #make sure there's an equal number of particle pair lists
        #and particle weight lists
        dart_list = []
        state_info = nc_context.getState(True, True, False, True, True, False)
        temp_pos = state_info.getPositions(asNumpy=True)
        part1 = temp_pos[basis_particles[0]]
        part2 = temp_pos[basis_particles[1]]
        part3 = temp_pos[basis_particles[2]]
        for dart in self.n_dartboard:
            old_center = self._findOldCoord(part1, part2, part3, dart)
            dart_list.append(old_center)
        self.dartboard = dart_list[:]
        return dart_list
项目:blues    作者:MobleyLab    | 项目源码 | 文件源码
def dist_from_dart_center(self, sim_atom_pos, binding_mode_atom_pos):

        num_lig_atoms = len(self.residueList)

        dist_list = np.zeros((num_lig_atoms, 1))
        diff_list = np.zeros((num_lig_atoms, 3))
        indexList = []
        #Find the distances of the center to each dart, appending
        #the results to dist_list
        #TODO change to handle np.arrays instead

        for index, dart in enumerate(binding_mode_atom_pos):
            diff = sim_atom_pos[index] - dart
            dist = np.sqrt(np.sum((diff)*(diff)))
#            dist = np.sqrt(np.sum((diff)*(diff)))*unit.nanometers
            print('binding_mode_atom_pos', binding_mode_atom_pos)
            print('sim_atom_pos', sim_atom_pos[index])
            print('dart', dart)
            print('diff', diff)
            diff_list[index] = diff
            dist_list[index] = dist
            print('diff_list', diff_list[index])
            print('dist_list', dist_list[index])


        return dist_list, diff_list
项目:blues    作者:MobleyLab    | 项目源码 | 文件源码
def virtualDart(self, virtual_particles=None):
        """
        For dynamically updating dart positions based on positions
        of other particles.
        This takes the weighted average of the specified particles
        and changes the dartboard of the object

        Arguments
        ---------
        virtual_particles: list of ints
            Each int in the list specifies a particle
        particle_weights: list of list of floats
            each list defines the weights assigned to each particle positions
        Returns
        -------
        dart_list list of 1x3 np.arrays in units.nm
            new dart positions calculated from the particle_pairs
            and particle_weights

        """
        if virtual_particles == None:
            virtual_particles = self.virtual_particles

        dart_list = []
        state_info = self.nc_context.getState(True, True, False, True, True, False)
        temp_pos = state_info.getPositions(asNumpy=True)
        #find virtual particles positions and add to dartboard
        for particle in virtual_particles:
            print('temp_pos', particle, temp_pos[particle])
            dart_list.append(temp_pos[particle])
        self.dartboard = dart_list[:]
        return dart_list
项目:blues    作者:MobleyLab    | 项目源码 | 文件源码
def n_findDart(self, basis_particles=None):
        """
        Helper function to dynamically update dart positions based on positions
        of other particles.


        Arguments
        ---------
        basis_particles: list of 3 ints
            Specifies the 3 indices of particles whose coordinates will be used
            as basis vectors. If None is specified, uses those found in basis particles

        Returns
        -------
        dart_list list of 1x3 np.arrays in units.nm
            new dart positions calculated from the particle_pairs
            and particle_weights

        """
        if basis_particles == None:
            basis_particles = self.basis_particles
        #make sure there's an equal number of particle pair lists
        #and particle weight lists
        dart_list = []
        state_info = self.nc_context.getState(True, True, False, True, True, False)
        temp_pos = state_info.getPositions(asNumpy=True)
        part1 = temp_pos[basis_particles[0]]
        part2 = temp_pos[basis_particles[1]]
        part3 = temp_pos[basis_particles[2]]
        print('n_findDart before dartboard', self.dartboard)
        for dart in self.n_dartboard:
            print('particles', part1, part2, part3)
            old_center = findOldCoord(part1, part2, part3, dart)
            dart_list.append(old_center)
        self.dartboard = dart_list[:]
        print('n_findDart dartboard', self.dartboard)
        return dart_list
项目:blues    作者:MobleyLab    | 项目源码 | 文件源码
def virtualDart(self, virtual_particles=None):
        """
        For dynamically updating dart positions based on positions
        of other particles.
        This takes the weighted average of the specified particles
        and changes the dartboard of the object

        Arguments
        ---------
        virtual_particles: list of ints
            Each int in the list specifies a particle
        particle_weights: list of list of floats
            each list defines the weights assigned to each particle positions
        Returns
        -------
        dart_list list of 1x3 np.arrays in units.nm
            new dart positions calculated from the particle_pairs
            and particle_weights

        """
        if virtual_particles == None:
            virtual_particles = self.virtual_particles

        dart_list = []
        state_info = self.nc_context.getState(True, True, False, True, True, False)
        temp_pos = state_info.getPositions(asNumpy=True)
        #find virtual particles positions and add to dartboard
        for particle in virtual_particles:
            print('temp_pos', particle, temp_pos[particle])
            dart_list.append(temp_pos[particle])
        self.dartboard = dart_list[:]
        return dart_list
项目:PBSuite    作者:dbrowneup    | 项目源码 | 文件源码
def normalize(self, instances):
        """
        This works on numpy.arrays
        normalizes in place 
        """
        lower = -1.
        upper =  1

        ranges = self.ranges
        def getVal(a):
            """
            Mathmatics for normalizing
            This is pretty slow.
            """
            ret = numpy.zeros(len(a))
            for pos,i in enumerate(a):
                if i == minimum:
                    ret[pos] = lower
                elif i == maximum:
                    ret[pos] = upper
                else:
                    ret[pos] = lower + (upper-lower) * (i - minimum) / (maximum - minimum)
            return ret

        for i in xrange(len(ranges.keys())):
            attribute = i + 1
            minimum, maximum = ranges[attribute]
            instances[i] = numpy.apply_along_axis(getVal, 0, instances[i])
项目:SyConn    作者:StructuralNeurobiologyLab    | 项目源码 | 文件源码
def save_to_h5py(data, path, hdf5_names=None):
    """
    Saves data to h5py File

    Parameters
    ----------
    data: list of np.arrays
    path: str
    hdf5_names: list of str
        has to be the same length as data

    Returns
    -------
    nothing

    """
    if (not type(data) is dict) and hdf5_names is None:
        raise Exception("hdf5names has to be set, when data is a list")
    if os.path.isfile(path):
        os.remove(path)
    f = h5py.File(path, "w")
    if type(data) is dict:
        for key in data.keys():
            f.create_dataset(key, data=data[key],
                             compression="gzip")
    else:
        if len(hdf5_names) != len(data):
            f.close()
            raise Exception("Not enough or to much hdf5-names given!")
        for nb_data in range(len(data)):
            f.create_dataset(hdf5_names[nb_data], data=data[nb_data],
                             compression="gzip")
    f.close()
项目:nept    作者:vandermeerlab    | 项目源码 | 文件源码
def consecutive(array, stepsize=1):
    """

    Parameters
    ----------
    array : np.array

    Returns
    -------
    List of np.arrays, split when jump greater than stepsize

    """

    return np.split(array, np.where(np.diff(array) != stepsize)[0]+1)
项目:opendeplete    作者:mit-crpg    | 项目源码 | 文件源码
def total_density_list(self):
        """ Returns a list of total density lists.

        This list is in the exact same order as depletion_matrix_list, so that
        matrix exponentiation can be done easily.

        Returns
        -------
        list of numpy.array
            A list of np.arrays containing total atoms of each cell.
        """

        total_density = [self.number.get_mat_slice(i) for i in range(self.number.n_mat_burn)]

        return total_density
项目:KMMMs    作者:blt2114    | 项目源码 | 文件源码
def __init__(self, length=5, pi_mag=1, K=None):
        """ initialize the motif

        The motif parameters are stored as deques of np.arrays so that it is
        easy to append and remove bases from the edges of the motifs.

        Args:
            length: the length of the motif, more precisely, the active
                component.
            pi_mag: the magnitude of the dirichlet prior of beta, pi. This
                prior acts as a pseudo-count on each position.
            K: we will want to scale down the number of aligned bases by K
                since we expect a factor of K more alignments when we are using
                overlapping K-mers.
        """
        self.motif_len = length

        # Position Weight Matrix will be stored as a deque of 1D np.arrays,
        # this representation is overparameterized, since in each column
        # must lie on the 3-Simplex, but keeping all for values explicity
        # allows for easier and more efficient  manipulation
        pwm = np.zeros([length+2, len(tools.BASES)])
        self.beta = deque(pwm)
        self.beta_rc = deque(flipud(fliplr(np.array(self.beta))))
        Motif.update_rc(self) # initialize reverse complement pwm parameters

        # dirichlet prior on PWM, a uniform prior is placed over each
        # position of the PWM
        self.pi_dir = pi_mag*np.ones(shape=len(tools.BASES), dtype=np.float32)

        # we use this to store counts of each base observed in each of the
        # positions along the length of the motif as well as on the sides.
        self.eta = deque(np.zeros([length+2, len(tools.BASES)]))
        self.K = K
项目:KMMMs    作者:blt2114    | 项目源码 | 文件源码
def __init__(self, length=4, pi_mag=100, min_len=4, max_len=10,
                 variational=False, tau=None, kappa=0, log_p_longer=-15,
                 verbose=False, f=None, K=None):
        """ initialize the motif

        The motif parameters are stored as deques of np.arrays so that it is
        easy to append and remove bases from the edges of the motifs.

        We may also update Dynamic motifs in the context of Variational
        inference.  Here, we simple choose the maximum likelihood shape.

        Args:
            length: the length of the motif, more precisely, the active
                component.
            pi_mag: the magnitude of the dirichlet prior of beta, pi. This
                prior acts as a pseudo-count on each position.
            min_len: the minimum number of bases that can be in a motif.
            max_len: the largest number of bases a motif can be.
            variational: if we want to use the Variational Motif as our base
                class.
            tau: delay parameter for SVI updates.
            kappa: forget rate parameter for SVI updates.
            log_p_longer: log prior probability of motif being longer than its
                current length.  In practice, this must be a large value,
                especially if we have a large Kmer table.
            verbose: True if we want to log more often (for debugging)
            f: file handle to log to if verbose
        """
        assert length >= min_len and length <= max_len
        assert max_len%2 == 0 and length%2 == 0 and min_len%2 == 0
        DynamicMotif.__init__(self, length, pi_mag=pi_mag, min_len=min_len,
                              max_len=max_len, variational=variational, tau=tau,
                              kappa=kappa, log_p_longer=log_p_longer,
                              verbose=verbose, f=f, K=K)
项目:Accent-Classifier    作者:dwww2012    | 项目源码 | 文件源码
def make_standard_length(filename, n_samps=240000):
    down_sig, rate = downsample(filename)
    normed_sig = librosa.util.fix_length(down_sig, n_samps)
    normed_sig = (normed_sig - np.mean(normed_sig))/np.std(normed_sig))
    return normed_sig

# from a folder containing wav files, normalize each, divide into num_splits-1 chunks and write the resulting np.arrays to a single matrix
项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def great_circle_dist(ra1, dec1, ra2, dec2):
    '''
    This calculates the great circle angular distance in arcseconds between two
    coordinates (ra1,dec1) and (ra2,dec2). This is basically a clone of GCIRC
    from the IDL Astrolib.

    PARAMETERS:

    ra1,dec1: first coordinate (decimal degrees) -- scalar or np.array
    ra2,dec2: second coordinate (decimal degrees) -- scalar or np.array

    RETURNS:

    great circle distance between the two coordinates in arseconds.

    if (ra1,dec1) scalar and (ra2,dec2) scalar: result is a scalar

    if (ra1,dec1) scalar and (ra2,dec2) np.array: result is np.array with
    distance between (ra1,dec1) and each element of (ra2,dec2)

    if (ra1,dec1) np.array and (ra2,dec2) scalar: result is np.array with
    distance between (ra2,dec2) and each element of (ra1,dec1)

    if (ra1,dec1) and (ra2,dec2) both np.arrays: result is np.array with
    pair-wise distance between each element of the two coordinate lists.

    If the input np.arrays are not the same length, then excess elements of the
    longer ones will be ignored.

    '''

    # wrap RA if negative or larger than 360.0 deg
    in_ra1 = ra1 % 360.0
    in_ra1 = in_ra1 + 360.0*(in_ra1 < 0.0)
    in_ra2 = ra2 % 360.0
    in_ra2 = in_ra2 + 360.0*(in_ra1 < 0.0)

    # convert to radians
    ra1_rad, dec1_rad = np.deg2rad(in_ra1), np.deg2rad(dec1)
    ra2_rad, dec2_rad = np.deg2rad(in_ra2), np.deg2rad(dec2)

    del_dec2 = (dec2_rad - dec1_rad)/2.0
    del_ra2 =  (ra2_rad - ra1_rad)/2.0
    sin_dist = np.sqrt(np.sin(del_dec2) * np.sin(del_dec2) + \
                           np.cos(dec1_rad) * np.cos(dec2_rad) * \
                           np.sin(del_ra2) * np.sin(del_ra2))

    dist_rad = 2.0 * np.arcsin(sin_dist)

    # return the distance in arcseconds
    return np.rad2deg(dist_rad)*3600.0
项目:astrobase    作者:waqasbhatti    | 项目源码 | 文件源码
def xmatch_neighbors(ra1, dec1, ra2, dec2, match_radius=60.0,
                     includeself=False,sortresults=True):
    '''
    This is a quick matcher that uses great_circle_dist to find the closest
    neighbors in (ra2,dec2) within match_radius to (ra1,dec1). (ra1,dec1) must
    be a scalar pair, while (ra2,dec2) must be np.arrays of the same lengths

    PARAMETERS:
    ra1/dec1: coordinates of the target to match

    ra2/dec2: coordinate np.arrays of the list of coordinates to match to

    includeself: if True, includes matches in list to self-coordinates

    sortresults: if True, returns match_index in order of increasing distance
    from target

    RETURNS:

    A tuple like the following:

    (True -> no match or False -> matched,
     minimum distance between target and list,
     np.array of indices where list of coordinates is closer than match_radius
     to the target)

    '''

    dist = great_circle_dist(ra1,dec1,ra2,dec2)

    if includeself:
        match_dist_ind = np.where(dist < match_radius)

    else:
        # make sure we match only objects that are not the same as this object
        match_dist_ind = np.where((dist < match_radius) & (dist > 0.1))

    if len(match_dist_ind) > 0:
        match_dists = dist[match_dist_ind]
        dist_sort_ind = np.argsort(match_dists)

        if sortresults:
            match_dist_ind = (match_dist_ind[0])[dist_sort_ind]

        min_dist = np.min(match_dists)

        return (True,min_dist,match_dist_ind,match_dists[dist_sort_ind])

    else:
        return (False,)


###################
## PROPER MOTION ##
###################
项目:traffic_detection_yolo2    作者:wAuner    | 项目源码 | 文件源码
def get_batches(df, image_path, batch_size=16):
    """
    Takes in a dataframe and returns X,y as well as the mask
    """

    # group objects by images and ignore if more than one object per anchor
    grouped = df[df['pot_conflict'] == False].groupby('Frame')
    # list all image filenames
    fnames = np.array(os.listdir(image_path))
    fnames_w_obj = [name for name, _ in grouped]

    # create two lists of frame. labels frame contains training labels
    # mask_vals contains frames with grid coordinates for each image with objects
    labels = {img_name: frame[['xc_rel', 'yc_rel', 'w_train', 'h_train',
                               'Car', 'Pedestrian', 'Truck']].as_matrix() for img_name, frame in grouped}

    mask_vals = {img_name: frame[['y_grid_idx', 'x_grid_idx', 'resp_anchor']].as_matrix().T \
                 for img_name, frame in grouped}

    batches_per_epoch = len(fnames) // batch_size + 1
    for batch_idx in range(batches_per_epoch):
        # select random indices for each batch
        # random_indices = np.random.choice(np.arange(len(fnames)), size=batch_size, replace=False)

        fnames_batch = np.random.choice(fnames, size=batch_size, replace=False)
        # load all images for the batch
        X = np.array([plt.imread(image_path + file) for file in fnames_batch])

        # dim: batch_size * gridx *  gridy * n_anchors
        mask = np.zeros([batch_size, 13, 13, 5])
        # dim: batch_size * gridx *  gridy * n_anchors * (4 + n_classes)
        y = np.zeros([batch_size, 13, 13, 5, 7])

        for count, img_name in enumerate(fnames_batch):
            # checks if frame is in dataframe, is only there if image has objects
            if img_name in fnames_w_obj:
                # handles all objects in an image at once by indexing with arrays
                # create boolean mask
                m0, m1, m2 = mask_vals[img_name]
                mask[count, m0, m1, m2] = 1
                # create labels
                y[count, m0, m1, m2] = labels[img_name]

        # make mask boolean
        mask = mask > 0

        yield X, y, mask
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def recent_events(self, events):
        frame = events.get('frame')
        if self.active and frame:
            recent_pupil_positions = events['pupil_positions']
            gray_img = frame.gray

            if self.clicks_to_close <=0:
                self.stop()
                return

            # detect the marker
            self.markers = find_concetric_circles(gray_img, min_ring_count=4)

            if len(self.markers) > 0:
                self.detected = True
                marker_pos = self.markers[0][0][0]  # first marker, innermost ellipse,center
                self.pos = normalize(marker_pos, (frame.width, frame.height), flip_y=True)

            else:
                self.detected = False
                self.pos = None  # indicate that no reference is detected

            # only save a valid ref position if within sample window of calibraiton routine
            on_position = self.lead_in < self.screen_marker_state < (self.lead_in+self.sample_duration)

            if on_position and self.detected:
                ref = {}
                ref["norm_pos"] = self.pos
                ref["screen_pos"] = marker_pos
                ref["timestamp"] = frame.timestamp
                self.ref_list.append(ref)

            # always save pupil positions
            for p_pt in recent_pupil_positions:
                if p_pt['confidence'] > self.pupil_confidence_threshold:
                    self.pupil_list.append(p_pt)

            if on_position and self.detected and events.get('fixations', []):
                self.screen_marker_state = min(
                    self.sample_duration+self.lead_in,
                    self.screen_marker_state+self.fixation_boost)

            # Animate the screen marker
            if self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
                if self.detected or not on_position:
                    self.screen_marker_state += 1
            else:
                self.screen_marker_state = 0
                if not self.sites:
                    self.stop()
                    return
                self.active_site = self.sites.pop(0)
                logger.debug("Moving screen marker to site at {} {}".format(*self.active_site))

            # use np.arrays for per element wise math
            self.display_pos = np.array(self.active_site)
            self.on_position = on_position
            self.button.status_text = '{} / {}'.format(self.active_site, 9)
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def rollout_snn(env, agent, max_path_length=np.inf, reset_start_rollout=True,
                switch_lat_every=0, animated=False, speedup=1):
    """
    :param reset_start_rollout: whether to reset at the start of every rollout
    :param switch_lat_every: potential change in latents (by resetting the agent with forced resample lat)
    """
    observations = []
    actions = []
    rewards = []
    agent_infos = []
    env_infos = []
    if reset_start_rollout:
        o = env.reset()  # in case rollout is called to produce parts of a trajectory: otherwise it will never advance!!
    else:
        if isinstance(env, NormalizedEnv):
            o = env.wrapped_env.get_current_obs()
        else:
            o = env.get_current_obs()
    agent.reset()  # this resamples a latent in SNNs!
    path_length = 0
    if animated:
        env.render()
    while path_length < max_path_length:
        if switch_lat_every > 0 and path_length % switch_lat_every == 0:
            agent.reset(force_resample_lat=True)  # here forced to resample a latent
        a, agent_info = agent.get_action(o)
        next_o, r, d, env_info = env.step(a)
        observations.append(env.observation_space.flatten(o))
        rewards.append(r)
        actions.append(env.action_space.flatten(a))
        agent_infos.append(agent_info)
        env_infos.append(env_info)
        path_length += 1
        if d:
            break
        o = next_o
        if animated:
            env.render()
            timestep = 0.05
            time.sleep(timestep / speedup)

    return dict(
        observations=tensor_utils.stack_tensor_list(observations),
        actions=tensor_utils.stack_tensor_list(actions),
        rewards=tensor_utils.stack_tensor_list(rewards),
        agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
        env_infos=tensor_utils.stack_tensor_dict_list(env_infos),  # here it concatenates all lower-level paths!
        #  So all elements are np.arrays of max_path_length x time_steps_agg x corresp_dim
        #  hence the next concatenation done by sampler at the higher level doesn't work because the mismatched dim
        #  1 and not 0!!
    )
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _mean_prediction(self, mu, Y, h, t_z, X_oos):
        """ Creates a h-step ahead mean prediction

        This function is used for predict(). We have to iterate over the number
        of timepoints (h) that the user wants to predict, using as inputs the ARIMA
        parameters, past datapoints, past predicted datapoints, and assumptions about
        future exogenous variables (X_oos).

        Parameters
        ----------
        mu : np.array
            The past predicted values

        Y : np.array
            The past data

        h : int
            How many steps ahead for the prediction

        t_z : np.array
            A vector of (transformed) latent variables

        X_oos : np.array
            Out of sample X data

        Returns
        ----------
        h-length vector of mean predictions
        """     

        # Create arrays to iterate over
        Y_exp = Y.copy()
        mu_exp = mu.copy()

        # Loop over h time periods          
        for t in range(0,h):
            new_value = 0
            if self.ar != 0:
                for j in range(0, self.ar):
                    new_value += t_z[j]*Y_exp[-j-1]

            if self.ma != 0:
                for k in range(0, self.ma):
                    if k >= t:
                        new_value += t_z[k+self.ar]*(Y_exp[-k-1]-mu_exp[-k-1])

            # X terms
            new_value += np.matmul(X_oos[t,:],t_z[self.ma+self.ar:(self.ma+self.ar+len(self.X_names))])            

            if self.model_name2 == "Exponential":
                Y_exp = np.append(Y_exp, [1.0/self.link(new_value)])
            else:
                Y_exp = np.append(Y_exp, [self.link(new_value)])

            mu_exp = np.append(mu_exp, [0]) # For indexing consistency

        return Y_exp
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def _mean_prediction(self, mu, Y, h, t_z):
        """ Creates a h-step ahead mean prediction

        This function is used for predict(). We have to iterate over the number
        of timepoints (h) that the user wants to predict, using as inputs the ARIMA
        parameters, past datapoints, and past predicted datapoints.

        Parameters
        ----------
        mu : np.ndarray
            The past predicted values

        Y : np.ndarray
            The past data

        h : int
            How many steps ahead for the prediction

        t_z : np.ndarray
            A vector of (transformed) latent variables

        Returns
        ----------
        h-length np.array of mean predictions
        """     

        # Create arrays to iteratre over
        Y_exp = Y.copy()
        mu_exp = mu.copy()

        # Loop over h time periods          
        for t in range(0,h):
            new_value = t_z[0]

            if self.ar != 0:
                for j in range(1, self.ar+1):
                    new_value += t_z[j]*Y_exp[-j]

            if self.ma != 0:
                for k in range(1, self.ma+1):
                    if (k-1) >= t:
                        new_value += t_z[k+self.ar]*(Y_exp[-k]-self.link(mu_exp[-k]))

            if self.model_name2 == "Exponential":
                Y_exp = np.append(Y_exp, [1.0/self.link(new_value)])
            else:
                Y_exp = np.append(Y_exp, [self.link(new_value)])

            mu_exp = np.append(mu_exp,[0]) # For indexing consistency

        return Y_exp