Python numpy 模块,atleast_2d() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.atleast_2d()

项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def __init__(self, X, Y, R=None, t=None, s=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0):
    if X.shape[1] != Y.shape[1]:
      raise 'Both point clouds must have the same number of dimensions!'

    self.X             = X
    self.Y             = Y
    self.TY            = Y
    (self.N, self.D)   = self.X.shape
    (self.M, _)        = self.Y.shape
    self.R             = np.eye(self.D) if R is None else R
    self.t             = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
    self.s             = 1 if s is None else s
    self.sigma2        = sigma2
    self.iteration     = 0
    self.maxIterations = maxIterations
    self.tolerance     = tolerance
    self.w             = w
    self.q             = 0
    self.err           = 0
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def __init__(self, X, Y, B=None, t=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0):
    if X.shape[1] != Y.shape[1]:
        raise 'Both point clouds must have the same number of dimensions!'

    self.X             = X
    self.Y             = Y
    self.TY            = Y
    (self.N, self.D)   = self.X.shape
    (self.M, _)        = self.Y.shape
    self.B             = np.eye(self.D) if B is None else B
    self.t             = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
    self.sigma2        = sigma2
    self.iteration     = 0
    self.maxIterations = maxIterations
    self.tolerance     = tolerance
    self.w             = w
    self.q             = 0
    self.err           = 0
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def prepare_2D_traces(data, viz_type=None, fs=None, line_names=None):
    data = _np.atleast_2d(data)
    N, L = data.shape

    x = prepare_2D_x(L, viz_type, fs)

    traces = [None] * N

    for k in range(0, N):
        traces[k] = go.Scatter(
            x=x,
            y=data[k]
        )
        try:
            traces[k].name = line_names[k]
        except TypeError:
            pass

    return traces
项目:sound_field_analysis-py    作者:QULab    | 项目源码 | 文件源码
def whiteNoise(fftData, noiseLevel=80):
    '''Adds White Gaussian Noise of approx. 16dB crest to a FFT block.

    Parameters
    ----------
    fftData : array of complex floats
       Input fftData block (e.g. from F/D/T or S/W/G)
    noiseLevel : int, optional
       Average noise Level in dB [Default: -80dB]

    Returns
    -------
    noisyData : array of complex floats
       Output fftData block including white gaussian noise
    '''
    dimFactor = 10**(noiseLevel / 20)
    fftData = _np.atleast_2d(fftData)
    channels = fftData.shape[0]
    NFFT = fftData.shape[1] * 2 - 2
    nNoise = _np.random.rand(channels, NFFT)
    nNoise = dimFactor * nNoise / _np.mean(_np.abs(nNoise))
    nNoiseSpectrum = _np.fft.rfft(nNoise, axis=1)
    return fftData + nNoiseSpectrum
项目:circletracking    作者:caspervdw    | 项目源码 | 文件源码
def get_slice(coords, shape, radius):
    """Returns the slice and origin that belong to ``slice_image``"""
    # interpret parameters
    ndim = len(shape)
    radius = validate_tuple(radius, ndim)
    coords = np.atleast_2d(np.round(coords).astype(np.int))
    # drop features that have no pixels inside the image
    in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r)
                         for i, sh, r in zip(range(ndim), shape, radius)])
    coords = coords[np.all(in_bounds, axis=0)]
    # return if no coordinates are left
    if len(coords) == 0:
        return [slice(None, 0)] * ndim, None
    # calculate the box
    lower = coords.min(axis=0) - radius
    upper = coords.max(axis=0) + radius + 1
    # calculate the slices
    origin = [None] * ndim
    slices = [None] * ndim
    for i, sh, low, up in zip(range(ndim), shape, lower, upper):
        lower_bound_trunc = max(0, low)
        upper_bound_trunc = min(sh, up)
        slices[i] = slice(lower_bound_trunc, upper_bound_trunc)
        origin[i] = lower_bound_trunc
    return slices, origin
项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def _transform_frame(self, src):
        """Mapping source spectral feature x to target spectral feature y
        so that minimize the mean least squared error.
        More specifically, it returns the value E(p(y|x)].

        Args:
            src (array): shape (`order of spectral feature`) source speaker's
                spectral feature that will be transformed

        Returns:
            array: converted spectral feature
        """
        D = len(src)

        # Eq.(11)
        E = np.zeros((self.num_mixtures, D))
        for m in range(self.num_mixtures):
            xx = np.linalg.solve(self.covarXX[m], src - self.src_means[m])
            E[m] = self.tgt_means[m] + self.covarYX[m].dot(xx)

        # Eq.(9) p(m|x)
        posterior = self.px.predict_proba(np.atleast_2d(src))

        # Eq.(13) conditinal mean E[p(y|x)]
        return posterior.dot(E).flatten()
项目:nnmnkwii    作者:r9y9    | 项目源码 | 文件源码
def __init__(self,
                 file_data_source):
        self.file_data_source = file_data_source
        collected_files = self.file_data_source.collect_files()

        # Multiple files
        if isinstance(collected_files, tuple):
            collected_files = np.asarray(collected_files).T
            lengths = np.array([len(files) for files in collected_files])
            if not (lengths == lengths[0]).all():
                raise RuntimeError(
                    """Mismatch of number of collected files {}.
You must collect same number of files when you collect multiple pair of files.""".format(
                        tuple(lengths)))
        else:
            collected_files = np.atleast_2d(collected_files).T
        if len(collected_files) == 0:
            warn("No files are collected. You might have specified wrong data source.")

        self.collected_files = collected_files
项目:tensorpac    作者:EtienneCmb    | 项目源码 | 文件源码
def _check_freq(f):
    """Check the frequency definition."""
    f = np.atleast_2d(np.asarray(f))
    #
    if len(f.reshape(-1)) == 1:
        raise ValueError("The length of f should at least be 2.")
    elif 2 in f.shape:  # f of shape (N, 2) or (2, N)
        if f.shape[1] is not 2:
            f = f.T
    elif np.squeeze(f).shape == (4,):  # (fstart, fend, fwidth, fstep)
        f = _pair_vectors(*tuple(np.squeeze(f)))
    else:  # Sequential
        f = f.reshape(-1)
        f.sort()
        f = np.c_[f[0:-1], f[1::]]
    return f
项目:wendy    作者:jobovy    | 项目源码 | 文件源码
def potential(y,x,v,m,twopiG=1.,omega=None):
    """
    NAME:
       potential
    PURPOSE:
       compute the gravitational potential at a set of points
    INPUT:
       y - positions at which to compute the potential
       x - positions of N-body particles [N]
       v - velocities of N-body particles [N]
       m - masses of N-body particles [N]
       twopiG= (1.) value of 2 \pi G
       omega= (None) if set, frequency of external harmonic oscillator
    OUTPUT:
       potential(y)
    HISTORY:
       2017-05-12 - Written - Bovy (UofT/CCA)
    """
    if not omega is None:
        out= omega**2.*y**2./2.
    else:
        out= 0.
    return out\
        +twopiG\
        *numpy.sum(m*numpy.fabs(x-numpy.atleast_2d(y).T),axis=1)
项目:pymoskito    作者:cklb    | 项目源码 | 文件源码
def _control(self, time, trajectory_values=None, feedforward_values=None,
                 input_values=None, **kwargs):
        # input abbreviations
        x = input_values
        yd = trajectory_values
        eq = kwargs.get("eq", None)

        if eq is None:
            eq = calc_closest_eq_state(self._settings, input_values)
        x = x - np.atleast_2d(eq).T

        # this is a second version
        # x = calc_small_signal_state(self._settings, is_values)

        # u corresponds to a force [kg*m/s**2] = [N]
        u = - np.dot(self.K, x) + np.dot(self.V, yd[0, 0])

        return u
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim != 2 or voxel_data.shape[0] != 3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims] * 3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:, valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

# def get_linear_index(x, y, z, dims):
# """ Assuming xzy order. (y increasing fastest.
# TODO ensure this is right when dims are not all same
# """
# return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:MDT    作者:cbclab    | 项目源码 | 文件源码
def sort_eigensystem(parameters_dict):
        eigenvectors = np.stack(tensor_spherical_to_cartesian(np.squeeze(parameters_dict['theta']),
                                                              np.squeeze(parameters_dict['phi']),
                                                              np.squeeze(parameters_dict['psi'])), axis=0)

        eigenvalues = np.atleast_2d(np.squeeze(np.dstack([parameters_dict['d'],
                                                          parameters_dict['dperp0'],
                                                          parameters_dict['dperp1']])))

        ranking = np.atleast_2d(np.squeeze(np.argsort(eigenvalues, axis=1, kind='mergesort')[:, ::-1]))
        voxels_range = np.arange(ranking.shape[0])
        sorted_eigenvalues = np.concatenate([eigenvalues[voxels_range, ranking[:, ind], None]
                                             for ind in range(ranking.shape[1])], axis=1)
        sorted_eigenvectors = np.stack([eigenvectors[ranking[:, ind], voxels_range, :]
                                        for ind in range(ranking.shape[1])])

        return sorted_eigenvalues, sorted_eigenvectors, ranking
项目:pySTATIS    作者:mfalkiewicz    | 项目源码 | 文件源码
def test_RvPCA():
    data = get_wine_data()
    for d in data:
        d.cross_product()

    n_datasets = len(data)

    expected_output = np.array([[0.10360263],
                                [0.10363524],
                                [0.09208477],
                                [0.10370834],
                                [0.08063234],
                                [0.09907428],
                                [0.09353886],
                                [0.08881811],
                                [0.1110871],
                                [0.12381833]])
    output, _, _ = rv_pca(data, n_datasets)

    np.testing.assert_array_almost_equal(np.atleast_2d(output).T, expected_output)
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def distance_as_discrepancy(dist, *summaries, observed):
    """Evaluate a distance function with signature `dist(summaries, observed)` in ELFI."""
    summaries = np.column_stack(summaries)
    # Ensure observed are 2d
    observed = np.concatenate([np.atleast_2d(o) for o in observed], axis=1)
    try:
        d = dist(summaries, observed)
    except ValueError as e:
        raise ValueError('Incompatible data shape for the distance node. Please check '
                         'summary (XA) and observed (XB) output data dimensions. They '
                         'have to be at most 2d. Especially ensure that summary nodes '
                         'outputs 2d data even with batch_size=1. Original error message '
                         'was: {}'.format(e))
    if d.ndim == 2 and d.shape[1] == 1:
        d = d.reshape(-1)
    return d
项目:elfi    作者:elfi-dev    | 项目源码 | 文件源码
def autocov(x, lag=1):
    """Return the autocovariance.

    Assumes a (weak) univariate stationary process with mean 0.
    Realizations are in rows.

    Parameters
    ----------
    x : np.array of size (n, m)
    lag : int, optional

    Returns
    -------
    C : np.array of size (n,)

    """
    x = np.atleast_2d(x)
    # In R this is normalized with x.shape[1]
    C = np.mean(x[:, lag:] * x[:, :-lag], axis=1)
    return C
项目:SynthText    作者:ankush-me    | 项目源码 | 文件源码
def plane2xyz(center, ij, plane):
        """
        converts image pixel indices to xyz on the PLANE.

        center : 2-tuple
        ij : nx2 int array
        plane : 4-tuple

        return nx3 array.
        """
        ij = np.atleast_2d(ij)
        n = ij.shape[0]
        ij = ij.astype('float')
        xy_ray = (ij-center[None,:]) / DepthCamera.f
        z = -plane[2]/(xy_ray.dot(plane[:2])+plane[3])
        xyz = np.c_[xy_ray, np.ones(n)] * z[:,None]
        return xyz
项目:BAG_framework    作者:ucb-art    | 项目源码 | 文件源码
def _normalize_inputs(self, xi):
        """Normalize the inputs."""
        xi = np.asarray(xi, dtype=float)
        if xi.shape[-1] != self.ndim:
            raise ValueError("The requested sample points xi have dimension %d, "
                             "but this interpolator has dimension %d" % (xi.shape[-1], self.ndim))

        xi = np.atleast_2d(xi.copy())
        for idx, (offset, scale) in enumerate(self._scale_list):
            xi[..., idx] -= offset
            xi[..., idx] /= scale

        # take extension input account.
        xi += self._ext

        return xi
项目:pumpp    作者:bmcfee    | 项目源码 | 文件源码
def inverse(self, encoded, duration=None):
        '''Inverse transformation'''

        ann = jams.Annotation(namespace=self.namespace, duration=duration)
        for start, end, value in self.decode_intervals(encoded,
                                                       duration=duration):
            # Map start:end to frames
            f_start, f_end = time_to_frames([start, end],
                                            sr=self.sr,
                                            hop_length=self.hop_length)

            confidence = np.mean(encoded[f_start:f_end+1, value])

            value_dec = self.encoder.inverse_transform(np.atleast_2d(value))[0]

            for vd in value_dec:
                ann.append(time=start,
                           duration=end-start,
                           value=vd,
                           confidence=confidence)

        return ann
项目:pumpp    作者:bmcfee    | 项目源码 | 文件源码
def inverse(self, encoded, duration=None):
        '''Inverse static tag transformation'''

        ann = jams.Annotation(namespace=self.namespace, duration=duration)

        if np.isrealobj(encoded):
            detected = (encoded >= 0.5)
        else:
            detected = encoded

        for vd in self.encoder.inverse_transform(np.atleast_2d(detected))[0]:
            vid = np.flatnonzero(self.encoder.transform(np.atleast_2d(vd)))
            ann.append(time=0,
                       duration=duration,
                       value=vd,
                       confidence=encoded[vid])
        return ann
项目:smp_base    作者:x75    | 项目源码 | 文件源码
def perf_pi_continuous(self, x):
        # Use history length 1 (Schreiber k=1), kernel width of 0.5 normalised units
        # learnerReward.piCalcC.initialise(40, 1, 0.5);
        # learnerReward.piCalcC.initialise(1, 1, 0.5);
        # src = np.atleast_2d(x[0:-1]).T # start to end - 1
        # dst = np.atleast_2d(x[1:]).T # 1 to end
        # learnerReward.piCalcC.setObservations(src, dst)

        # print "perf_pi_continuous", x
        # learnerReward.piCalcC.initialise(100, 1);
        # learnerReward.piCalcC.initialise(50, 1);
        learnerReward.piCalcC.initialise(10, 1);
        # src = np.atleast_2d(x).T # start to end - 1
        # learnerReward.piCalcC.setObservations(src.reshape((src.shape[0],)))
        # print "x", x.shape
        learnerReward.piCalcC.setObservations(x)
        # print type(src), type(dst)
        # print src.shape, dst.shape
        return learnerReward.piCalcC.computeAverageLocalOfObservations()# * -1
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims]*3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:,valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

#def get_linear_index(x, y, z, dims):
    #""" Assuming xzy order. (y increasing fastest.
    #TODO ensure this is right when dims are not all same
    #"""
    #return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims]*3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:,valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

#def get_linear_index(x, y, z, dims):
    #""" Assuming xzy order. (y increasing fastest.
    #TODO ensure this is right when dims are not all same
    #"""
    #return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:gullikson-scripts    作者:kgullikson88    | 项目源码 | 文件源码
def __init__(self, wls, fls, sigmas, masks=None, orders='all', name=None):
        self.wls = np.atleast_2d(wls)
        self.fls = np.atleast_2d(fls)
        self.sigmas = np.atleast_2d(sigmas)
        self.masks = np.atleast_2d(masks) if masks is not None else np.ones_like(self.wls, dtype='b')

        self.shape = self.wls.shape
        assert self.fls.shape == self.shape, "flux array incompatible shape."
        assert self.sigmas.shape == self.shape, "sigma array incompatible shape."
        assert self.masks.shape == self.shape, "mask array incompatible shape."

        if orders != 'all':
            # can either be a numpy array or a list
            orders = np.array(orders)  #just to make sure
            self.wls = self.wls[orders]
            self.fls = self.fls[orders]
            self.sigmas = self.sigmas[orders]
            self.masks = self.masks[orders]
            self.shape = self.wls.shape
            self.orders = orders
        else:
            self.orders = np.arange(self.shape[0])

        self.name = name
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def _run_interface(self, runtime):
        out_file = op.abspath(self.inputs.out_file)
        self._results['out_file'] = out_file

        spikes_list = np.loadtxt(self.inputs.in_spikes, dtype=int).tolist()
        # No spikes
        if not spikes_list:
            with open(out_file, 'w') as f:
                f.write('<p>No high-frequency spikes were found in this dataset</p>')
            return runtime

        spikes_list = [tuple(i) for i in np.atleast_2d(spikes_list).tolist()]
        plot_spikes(
            self.inputs.in_file, self.inputs.in_fft, spikes_list,
            out_file=out_file)
        return runtime
项目:eqnet    作者:mast-group    | 项目源码 | 文件源码
def get_knn_score_for(tree, k=5):
        tree = tree_copy_with_start(tree)
        tree_encoding = encoder.get_encoding([None, tree])  # This makes sure that token-based things fail
        tree_str_rep = str(tree)

        distances = cdist(np.atleast_2d(tree_encoding), encodings, 'cosine')
        knns = np.argsort(distances)[0]

        num_non_identical_nns = 0
        sum_equiv_nns = 0
        current_i = 0
        while num_non_identical_nns < k and current_i < len(knns) and eq_class_counts[
            tree.symbol] - 1 > num_non_identical_nns:
            expression_idx = knns[current_i]
            current_i += 1
            if eq_class_idx_to_names[expression_data[expression_idx]['eq_class']] == tree.symbol and str(
                    expression_data[expression_idx]['tree']) == tree_str_rep:
                continue  # This is an identical expression, move on
            num_non_identical_nns += 1
            if eq_class_idx_to_names[expression_data[expression_idx]['eq_class']] == tree.symbol:
                sum_equiv_nns += 1
        return "(%s-nn-stat: %s)" % (k, sum_equiv_nns / k)
项目:MLLearning    作者:buptdjd    | 项目源码 | 文件源码
def fit(self, X, y, learning_rate=0.2, epochs=10000):
        x = np.atleast_2d(X)
        # add bias to X
        temp = np.ones((x.shape[0], x.shape[1]+1))
        temp[:, 0:-1] = x
        x = temp
        y = np.array(y)
        for k in range(epochs):
            # random to select one sample
            i = np.random.randint(x.shape[0])
            a = [x[i]]

            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))
            error = y[i] - a[-1]
            deltas = [error*self.activation_derivative(a[-1])]

            for l in range(len(a)-2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_derivative(a[l]))
            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def __initialize_simulation(self, x0, time_points, udata, \
        integrator_options_user):

        self.__x0 = inputchecks.check_states_data(x0, self.__system.nx, 0)

        time_points = inputchecks.check_time_points_input(time_points)
        number_of_integration_steps = time_points.size - 1
        time_steps = time_points[1:] - time_points[:-1]

        udata = inputchecks.check_controls_data(udata, self.__system.nu, \
            number_of_integration_steps)

        self.__simulation_input = ci.vertcat([np.atleast_2d(time_steps), udata])

        integrator_options = integrator_options_user.copy()
        integrator_options.update({"t0": 0, "tf": 1, "expand": True}) # ,  "number_of_finite_elements": 1})
        # integrator = ci.Integrator("integrator", "rk", \
        integrator = ci.Integrator("integrator", "cvodes", \
            self.__dae_scaled, integrator_options)

        self.__simulation = integrator.mapaccum("simulation", \
            number_of_integration_steps)
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def __compute_continuity_constraints(self):

        integrator = ci.Integrator("integrator", "rk", self.__ode_scaled, {"t0": 0, "tf": 1, "expand": True})

        params = ci.vertcat([np.atleast_2d(self.time_points[1:] - self.time_points[:-1]), \
            self.optimization_variables["U"], \
            ci.repmat(self.optimization_variables["Q"], 1, self.number_of_intervals), \
            ci.repmat(self.optimization_variables["P"], 1, self.number_of_intervals), \
            self.optimization_variables["EPS_U"]])

        shooting = integrator.map("shooting", "openmp", self.number_of_intervals)
        X_next = shooting(x0 = self.optimization_variables["X"][:,:-1], \
            p = params)["xf"]

        self.__continuity_constraints = \
            self.optimization_variables["X"][:, 1:] - X_next
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def check_controls_data(udata, nu, number_of_controls):

    if not nu == 0:

        if udata is None:
            udata = np.zeros((nu, number_of_controls))

        udata = np.atleast_2d(udata)

        if udata.shape == (number_of_controls, nu):
            udata = udata.T

        if not udata.shape == (nu, number_of_controls):

            raise ValueError( \
                "Time-varying control values provided by user have wrong dimension.")

        return udata

    else:

        return ci.dmatrix(0, number_of_controls)
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def check_constant_controls_data(qdata, nq):

    if not nq == 0:

        if qdata is None:
            qdata = np.zeros((nq, 1))

        qdata = np.atleast_2d(qdata)

        if qdata.shape == (1, nq):
            qdata = qdata.T

        if not qdata.shape == (nq, 1):

            raise ValueError( \
                "Time-constant control values provided by user have wrong dimension.")

        return qdata

    else:

        return ci.dmatrix(0, 1)
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def check_states_data(xdata, nx, number_of_intervals):

    if not nx == 0:

        if xdata is None:
            xdata = np.zeros((nx, number_of_intervals + 1))

        xdata = np.atleast_2d(xdata)

        if xdata.shape == (number_of_intervals + 1, nx):
            xdata = xdata.T

        if not xdata.shape == (nx, number_of_intervals + 1):

            raise ValueError( \
                "State values provided by user have wrong dimension.")

        return xdata

    else:

        return ci.dmatrix(0,0)
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def check_measurement_data(ydata, nphi, number_of_measurements):

    if ydata is None:
        ydata = np.zeros((nphi, number_of_measurements))

    ydata = np.atleast_2d(ydata)

    if ydata.shape == (number_of_measurements, nphi):
        ydata = ydata.T

    if not ydata.shape == (nphi, number_of_measurements):

        raise ValueError( \
            "Measurement data provided by user has wrong dimension.")

    return ydata
项目:casiopeia    作者:adbuerger    | 项目源码 | 文件源码
def check_measurement_weightings(wv, nphi, number_of_measurements):

    if wv is None:
        wv = np.ones((nphi, number_of_measurements))

    wv = np.atleast_2d(wv)

    if wv.shape == (number_of_measurements, nphi):
        wv = wv.T

    if not wv.shape == (nphi, number_of_measurements):

        raise ValueError( \
            "Measurement weightings provided by user have wrong dimension.")

    return wv
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def _reshape(self, array):
        """
        checks shapes, eg convert them (2d), raise if not possible
        after checks passed, set self._array and return it.
        """

        if array.ndim == 1:
            array = np.atleast_2d(array).T
        elif array.ndim == 2:
            pass
        else:
            shape = array.shape
            # hold first dimension, multiply the rest
            shape_2d = (shape[0],
                        functools.reduce(lambda x, y: x * y, shape[1:]))
            array = np.reshape(array, shape_2d)
        return array
项目:coordinates    作者:markovmodel    | 项目源码 | 文件源码
def _add_array_to_storage(self, array):
        """
        checks shapes, eg convert them (2d), raise if not possible
        after checks passed, add array to self._data
        """
        if array.ndim == 1:
            array = np.atleast_2d(array).T
        elif array.ndim == 2:
            pass
        else:
            shape = array.shape
            # hold first dimension, multiply the rest
            shape_2d = (shape[0], functools.reduce(lambda x, y: x * y, shape[1:]))
            array = np.reshape(array, shape_2d)

        self.data.append(array)
项目:machineLearning    作者:zhangtianle    | 项目源码 | 文件源码
def fit(self, x, y, learningRate=0.2, epochs=10000):
        x = np.atleast_2d(x)
        temp = np.ones([x.shape[0], x.shape[1]+1])
        temp[:, 0:-1] = x
        x = temp

        for k in range(epochs):
            i = np.random.randint(x.shape[0])
            result = [x[i]]
            for l in range(len(self._weights)):
                result.append(self._activation(np.dot(result[l], self._weights[l])))
            error = y[i] - result[-1]
            deltas = [error * self._activationDeriv(result[-1])]

            for l in range(len(self._weights)-1, 0, -1):
                deltas.append(np.dot(self._weights[l], deltas[-1]) * self._activationDeriv(result[l]))
                # deltas.append(deltas[-1].dot(self._weights[l].T) * self._activationDeriv(result[l]))
            deltas.reverse()

            for i in range(len(self._weights)):
                layer = np.atleast_2d(result[i])
                delta = np.atleast_2d(deltas[i])
                self._weights[i] += learningRate * layer.T.dot(delta)
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def register(self, callback):
    self.initialize()

    while self.iteration < self.maxIterations and self.err > self.tolerance:
      self.iterate()
      if callback:
        callback(iteration=self.iteration, error=self.err, X=self.X, Y=self.TY)

    return self.TY, self.R, np.atleast_2d(self.t), self.s
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def register(self, callback):
    self.initialize()

    while self.iteration < self.maxIterations and self.err > self.tolerance:
      self.iterate()
      if callback:
        callback(iteration=self.iteration, error=self.err, X=self.X, Y=self.TY)

    return self.TY, self.B, np.atleast_2d(self.t)
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def __init__(self, Y, R=None, t=None, maxIterations=100, gamma=0.1, ):
    if Y is None:
      raise 'Empty list of point clouds!'

    dimensions = [cloud.shape[1] for cloud in Y]

    if not all(dimension == dimensions[0] for dimension in dimensions):
      raise 'All point clouds must have the same number of dimensions!'

    self.Y = Y
    self.M = [cloud.shape[0] for cloud in self.Y]
    self.D = dimensions[0]

    if R:
      rotations = [rotation.shape for rotation in R]
      if not all(rotation[0] == self.D and rotation[1] == self.D for rotation in rotations):
        raise 'All rotation matrices need to be %d x %d matrices!' % (self.D, self.D)
      self.R = R
    else:
      self.R = [np.eye(self.D) for cloud in Y]

    if t:
      translations = [translations.shape for translation in t]
      if not all(translations[0] == 1 and translations[1] == self.D for translation in translations):
        raise 'All translation vectors need to be 1 x %d matrices!' % (self.D)
      self.t = t
    else:
      self.t = [np.atleast_2d(np.zeros((1, self.D))) for cloud in self.Y]
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def plot_prediction(x_test, y_test, prediction, save=False):
    import matplotlib
    import matplotlib.pyplot as plt

    test_size = x_test.shape[0]
    fig, ax = plt.subplots(test_size, 3, figsize=(12,12), sharey=True, sharex=True)

    x_test = crop_to_shape(x_test, prediction.shape)
    y_test = crop_to_shape(y_test, prediction.shape)

    ax = np.atleast_2d(ax)
    for i in range(test_size):
        cax = ax[i, 0].imshow(x_test[i])
        plt.colorbar(cax, ax=ax[i,0])
        cax = ax[i, 1].imshow(y_test[i, ..., 1])
        plt.colorbar(cax, ax=ax[i,1])
        pred = prediction[i, ..., 1]
        pred -= np.amin(pred)
        pred /= np.amax(pred)
        cax = ax[i, 2].imshow(pred)
        plt.colorbar(cax, ax=ax[i,2])
        if i==0:
            ax[i, 0].set_title("x")
            ax[i, 1].set_title("y")
            ax[i, 2].set_title("pred")
    fig.tight_layout()

    if save:
        fig.savefig(save)
    else:
        fig.show()
        plt.show()
项目:mbin    作者:fanglab    | 项目源码 | 文件源码
def process_contig_chunk( args ):
    chunk_id      = args[0]
    control_pkl   = args[1]
    cut_CMDs      = args[2]
    kmers         = args[3]
    cols_chunk    = args[4]
    contig_id     = args[5]
    n_chunks      = args[6]
    n_contigs     = args[7]
    opts          = args[8]
    logging.info("  - Contig %s/%s: chunk %s/%s" % ((contig_id+1), n_contigs, (chunk_id+1), (n_chunks+1)))
    control_means = pickle.load(open(control_pkl, "rb"))
    contig_motifs = {}
    case_motif_Ns = {}

    for cut_CMD in cut_CMDs:
        sts,stdOutErr = mbin.run_OS_command( cut_CMD )

    fns                = map(lambda x: x.split("> ")[-1], cut_CMDs)
    contig_ipds_sub    = np.loadtxt(fns[0], dtype="float")
    contig_ipds_N_sub  = np.loadtxt(fns[1], dtype="int")
    # If there is only one row (read) for this contig, still treat as
    # a 2d matrix of many reads
    contig_ipds_sub    = np.atleast_2d(contig_ipds_sub)
    contig_ipds_N_sub  = np.atleast_2d(contig_ipds_N_sub)
    for j in range(len(cols_chunk)):
        motif = kmers[cols_chunk[j]]
        case_contig_Ns    = contig_ipds_N_sub[:,j]
        if control_means.get(motif):
            case_contig_means = contig_ipds_sub[:,j]
            if np.sum(case_contig_Ns)>0:
                case_mean = np.dot(case_contig_means, case_contig_Ns) / np.sum(case_contig_Ns)
            else:
                case_mean = 0
            score                = case_mean - control_means[motif]
            contig_motifs[motif] = score
            case_motif_Ns[motif] = np.sum(case_contig_Ns)
    return contig_motifs,case_motif_Ns
项目:mbin    作者:fanglab    | 项目源码 | 文件源码
def process_contig_chunk( args ):
    chunk_id        = args[0]
    cut_CMDs        = args[1]
    kmers           = args[2]
    cols_chunk      = args[3]
    n_chunks        = args[4]
    min_motif_count = args[5]
    logging.info("  - Control data: chunk %s/%s" % ((chunk_id+1), (n_chunks+1)))
    control_means = {}

    for cut_CMD in cut_CMDs:
        sts,stdOutErr = mbin.run_OS_command( cut_CMD )

    fns                = map(lambda x: x.split("> ")[-1], cut_CMDs)
    control_ipds_sub   = np.loadtxt(fns[0], dtype="float")
    control_ipds_N_sub = np.loadtxt(fns[1], dtype="int")
    # If there is only one row (read) for this contig, still treat as
    # a 2d matrix of many reads
    control_ipds_sub   = np.atleast_2d(control_ipds_sub)
    control_ipds_N_sub = np.atleast_2d(control_ipds_N_sub)

    not_found     = 0
    for j in range(len(cols_chunk)):
        motif = kmers[cols_chunk[j]]
        if np.sum(control_ipds_N_sub[:,j])>=min_motif_count:
            if np.sum(control_ipds_N_sub[:,j])>0:
                control_mean = np.dot(control_ipds_sub[:,j], control_ipds_N_sub[:,j]) / np.sum(control_ipds_N_sub[:,j])
            else:
                control_mean = 0
            control_means[motif] = control_mean
        else:
            not_found += 1

    return control_means,not_found
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def computeSMatrix(self):
        for m in range(self.n_tasks):
            task_X = self.task_dict[m]['X']
            task_Y = self.task_dict[m]['Y']
            task_xi = np.array(self.xi[m])

            for k in range(self.K):
                # Note that transposes are different because we are using different notation than in the paper - specifically we use row vectors where they are using column vectors

                # This does all data points (n) at once 
                inner = np.dot(np.atleast_2d(self.theta[k,:]).T, np.atleast_2d(self.theta[k,:])) + self.gamma[k]
                diag_entries = np.einsum('ij,ij->i', np.dot(task_X, inner), task_X)
                s_sum = -rhoFunction(task_xi)*diag_entries

                s_sum += ((task_Y.T - 0.5)* np.dot(np.atleast_2d(self.theta[k,:]), task_X.T))[0,:]
                s_sum += np.log(sigmoid(task_xi))
                s_sum += (-0.5)*task_xi
                s_sum += rhoFunction(task_xi)*(task_xi**2)

                s_sum = np.sum(s_sum)

                if k < self.K-1:
                    s_sum = s_sum + scipy.special.psi(self.small_phi1[k]) \
                                    - scipy.special.psi(self.small_phi1[k] + self.small_phi2[k])
                if k > 0:
                    for i in range(k):
                        s_sum = s_sum + scipy.special.psi(self.small_phi2[i]) \
                                    - scipy.special.psi(self.small_phi1[i] + self.small_phi2[i])


                self.s[m,k] = s_sum
        if self.debug: print "s:", self.s
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def updatePhi(self):
        a = np.array([np.max(self.s, axis=1)]).T #as used in logsumexp trick https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
        self.phi = np.exp(self.s - (a + np.log(np.atleast_2d(np.sum(np.exp(self.s - a),axis=1)).T)))
        if self.debug: 
            print "phi:", self.phi
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def updateTheta(self):
        for k in range(self.K):
            inner_sum = np.zeros((1,self.num_feats))
            for m in range(self.n_tasks):
                inner_sum = inner_sum + self.phi[m,k] * np.atleast_2d(self.task_vectors[m,:])
            self.theta[k,:] = (np.dot(self.gamma[k],(np.dot(la.inv(self.sigma),self.mu.T) + inner_sum.T)  )).T
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def computeXi(self):
        for m in range(self.n_tasks):
            task_X = self.task_dict[m]['X']
            for n in range(len(task_X)):
                inner_sum = 0
                for k in range(self.K):
                    # Note that transposes are different because we are using different notation than in the paper - specifically we use row vectors where they are using column vectors
                    inner_sum += self.phi[m,k]*np.dot((np.dot(np.atleast_2d(task_X[n,:]), 
                                                        (np.dot(np.atleast_2d(self.theta[k,:]).T, np.atleast_2d(self.theta[k,:])) + self.gamma[k]))),
                                                        np.atleast_2d(task_X[n,:]).T)
                assert inner_sum >= 0           # This number can't be negative since we are taking the square root

                self.xi[m][n] = np.sqrt(inner_sum[0,0])
                if self.xi[m][n]==0:
                    print m,n
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def predictProbability(self, task, X):
        prob = 0
        for k in range(self.K):
            numerator = np.dot(np.atleast_2d(self.theta[k,:]),X.T)
            diag_entries = np.einsum('ij,ij->i', np.dot(X, self.gamma[k]), X) ##
            denom = np.sqrt(1.0 + np.pi/8 * diag_entries)
            prob = prob + self.phi[task,k] * sigmoid(numerator / denom)
        return prob


    # Code for Predicting for a new task
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def dataProb(self,new_task_X,new_task_y,weights):
        prod = 1
        for i in range(len(new_task_X)):
            sig = sigmoid(np.dot(weights,np.atleast_2d(new_task_X[i,:]).T ))
            prod = prod*(sig**new_task_y[i]) * (1.0-sig)**(1-new_task_y[i])

        return prod
项目:PersonalizedMultitaskLearning    作者:mitmedialab    | 项目源码 | 文件源码
def predictNewTask(self,new_task_X,new_task_y,pred_X,N_sam=1000):
        w_dot_array = self.metropolisHastingsAlgorithm(new_task_X,new_task_y,N_sam)

        predictions = []
        for x_star in pred_X:
            predictions.append(sum([sigmoid(np.dot(w,np.atleast_2d(x_star).T))[0,0] for w in w_dot_array])/float(N_sam))
        predictions = [1.0 if p>=0.5 else 0.0 for p in predictions]
        return predictions


# Helper function
项目:Machine-Learning    作者:grasses    | 项目源码 | 文件源码
def fit(self, X, y, learning_rate = 0.2, epochs = 10000):
        X = np.atleast_2d(X)
        # temp.shape=(X.shape[0], X.shape[1] + 1) `+1` is for bais, so X[*][-1] = 1 => numpy.dot(x, weights) + numpy.dot(1 * bais)
        temp = np.ones([X.shape[0], X.shape[1] + 1])
        temp[:, 0:-1] = X
        X = temp
        y = np.array(y)

        '''
        loop operation for epochs times
        '''
        for k in range(epochs):
            # select a random line from X for training
            i = np.random.randint(X.shape[0])
            x = [X[i]]

            # going forward network, for each layer
            for l in range(len(self.weights)):
                # computer the node value for each layer (O_i) using activation function
                x.append(self.activation(np.dot(x[l], self.weights[l])))

            # computer the error at the top layer
            error = y[i] - x[-1]
            deltas = [error * self.activation_deriv(x[-1])]  # For output layer, Err calculation (delta is updated error)

            # start backprobagation
            for l in range(len(x) - 2, 0, -1):  # we need to begin at the second to last layer
                # compute the updated error (i,e, deltas) for each node going from top layer to input layer
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(x[l]))
            deltas.reverse()

            for i in range(len(self.weights)):
                layer = np.atleast_2d(x[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)