Python numpy 模块,newaxis() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.newaxis()

项目:FCN_train    作者:315386775    | 项目源码 | 文件源码
def preprocess(image):
    """Takes an image and apply preprocess"""
    # ????????????
    image = cv2.resize(image, (data_shape, data_shape))
    # ?? BGR ? RGB
    image = image[:, :, (2, 1, 0)]
    # ?mean?????float
    image = image.astype(np.float32)
    # ? mean
    image -= np.array([123, 117, 104])
    # ??? [batch-channel-height-width]
    image = np.transpose(image, (2, 0, 1))
    image = image[np.newaxis, :]
    # ?? ndarray
    image = nd.array(image)
    return image
项目:neural-doodle    作者:alexjc    | 项目源码 | 文件源码
def prepare_style(self, scale=1.0):
        """Called each phase of the optimization, process the style image according to the scale, then run it
        through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches.
        """
        style_img = self.rescale_image(self.style_img_original, scale)
        self.style_img = self.model.prepare_image(style_img)

        style_map = self.rescale_image(self.style_map_original, scale)
        self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32)

        # Compile a function to run on the GPU to extract patches for all layers at once.
        layer_outputs = zip(self.style_layers, self.model.get_outputs('sem', self.style_layers))
        extractor = self.compile([self.model.tensor_img, self.model.tensor_map], self.do_extract_patches(layer_outputs))
        result = extractor(self.style_img, self.style_map)

        # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. 
        self.style_data = {}
        for layer, *data in zip(self.style_layers, result[0::3], result[1::3], result[2::3]):
            patches = data[0]
            l = self.model.network['nn'+layer]
            l.num_filters = patches.shape[0] // args.slices
            self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\
                                   + [np.zeros((patches.shape[0],), dtype=np.float16)]
            print('  - Style layer {}: {} patches in {:,}kb.'.format(layer, patches.shape, patches.size//1000))
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def get_covariance(self):
        """Compute data covariance with the generative model.
        ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
        where  S**2 contains the explained variances.
        Returns
        -------
        cov : array, shape=(n_features, n_features)
            Estimated covariance of data.
        """
        components_ = self.components_
        exp_var = self.explained_variance_
        if self.whiten:
            components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        cov = np.dot(components_.T * exp_var_diff, components_)
        cov.flat[::len(cov) + 1] += self.noise_variance_  # modify diag inplace
        return cov
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def inverse_transform(self, X):
        """Transform data back to its original space, i.e.,
        return an input X_original whose transform would be X
        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples is the number of samples
            and n_components is the number of components.
        Returns
        -------
        X_original array-like, shape (n_samples, n_features)
        """
        check_is_fitted(self, 'mean_')

        if self.whiten:
            return fast_dot(
                X,
                np.sqrt(self.explained_variance_[:, np.newaxis]) *
                self.components_) + self.mean_
        else:
            return fast_dot(X, self.components_) + self.mean_
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def update_data_sort_order(self, new_sort_order=None):
        if new_sort_order is not None:
            self.current_order = new_sort_order
        self.update_sort_idcs()
        self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1],
                            0, len(self.sort_idcs)))
        self.data_ax.set_ylim(0, len(self.sort_idcs))
        all_raw_data  = self.raw_data
        all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis])
        if len(all_raw_data) > 0:
            cmax          = 0.5*all_raw_data.max()
            cmin          = 0.5*all_raw_data.min()
            all_raw_data  = all_raw_data[self.sort_idcs, :]
        else:
            cmin = 0
            cmax = 1
        self.data_image.set_data(all_raw_data)
        self.data_image.set_clim(cmin, cmax)
        self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points))
        self.data_selection.set_height(len(self.selected_points))
        self.update_data_plot()
项目:spyking-circus    作者:spyking-circus    | 项目源码 | 文件源码
def plot_electrodes(self):
        if not getattr(self, 'collections', None):
            # It is important to set one facecolor per point so that we can change
            # it later
            self.electrode_collection = self.electrode_ax.scatter(self.x_position,
                                                                  self.y_position,
                                                                  facecolor=['black' for _ in self.x_position],
                                                                  s=30)
            self.electrode_ax.set_xlabel('Space [um]')
            self.electrode_ax.set_xticklabels([])
            self.electrode_ax.set_ylabel('Space [um]')
            self.electrode_ax.set_yticklabels([])
        else:
            self.electrode_collection.set_offsets(np.hstack([self.x_position[np.newaxis, :].T,
                                                             self.y_position[np.newaxis, :].T]))
        ax, x, y = self.electrode_ax, self.y_position, self.x_position
        ymin, ymax = min(x), max(x)
        yrange = (ymax - ymin)*0.5 * 1.05  # stretch everything a bit
        ax.set_ylim((ymax + ymin)*0.5 - yrange, (ymax + ymin)*0.5 + yrange)
        xmin, xmax = min(y), max(y)
        xrange = (xmax - xmin)*0.5 * 1.05  # stretch everything a bit
        ax.set_xlim((xmax + xmin)*0.5 - xrange, (xmax + xmin)*0.5 + xrange)

        self.ui.raw_data.draw_idle()
项目:photo-manager-classifier    作者:damianmoore    | 项目源码 | 文件源码
def __init__(self):
        if not self.code_table:
            with open(CATEGORY_CODES) as codes:
                self.code_table = {int(k): v for k, v in json.loads(codes.read()).items()}

        caffe_models = os.path.expanduser(CAFFE_MODELS)
        model = 'squeezenet', 'init_net.pb', 'predict_net.pb', 'ilsvrc_2012_mean.npy', 227
        self.model = model

        mean_file = os.path.join(caffe_models, model[0], model[3])
        if not os.path.exists(mean_file):
            self.mean = 128
        else:
            mean = np.load(mean_file).mean(1).mean(1)
            self.mean = mean[:, np.newaxis, np.newaxis]

        init_net = os.path.join(caffe_models, model[0], model[1])
        predict_net = os.path.join(caffe_models, model[0], model[2])

        with open(init_net) as f:
            self.init_net = f.read()
        with open(predict_net) as f:
            self.predict_net = f.read()
项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def play(self, nb_rounds):
        img_saver = save_image()
        img_saver.next()

        game_cnt = it.count(1)
        for i in xrange(nb_rounds):
            game = self.game(width=self.width, height=self.height)
            screen, _ = game.next()
            img_saver.send(screen)
            frame_cnt = it.count()
            try:
                state = np.asarray([screen] * self.nb_frames)
                while True:
                    frame_cnt.next()
                    act_idx = np.argmax(
                        self.model.predict(state[np.newaxis]), axis=-1)[0]
                    screen, _ = game.send(self.actions[act_idx])
                    state = np.roll(state, 1, axis=0)
                    state[0] = screen
                    img_saver.send(screen)
            except StopIteration:
                print 'Saved %4i frames for game %3i' % (
                    frame_cnt.next(), game_cnt.next())
        img_saver.close()
项目:wmd-relax    作者:src-d    | 项目源码 | 文件源码
def compute_similarity(self, doc1, doc2):
            """
            Calculates the similarity between two spaCy documents. Extracts the
            nBOW from them and evaluates the WMD.

            :return: The calculated similarity.
            :rtype: float.
            """
            doc1 = self._convert_document(doc1)
            doc2 = self._convert_document(doc2)
            vocabulary = {
                w: i for i, w in enumerate(sorted(set(doc1).union(doc2)))}
            w1 = self._generate_weights(doc1, vocabulary)
            w2 = self._generate_weights(doc2, vocabulary)
            evec = numpy.zeros((len(vocabulary), self.nlp.vocab.vectors_length),
                               dtype=numpy.float32)
            for w, i in vocabulary.items():
                evec[i] = self.nlp.vocab[w].vector
            evec_sqr = (evec * evec).sum(axis=1)
            dists = evec_sqr - 2 * evec.dot(evec.T) + evec_sqr[:, numpy.newaxis]
            dists[dists < 0] = 0
            dists = numpy.sqrt(dists)
            return libwmdrelax.emd(w1, w2, dists)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def ray(self, pts, undistort=True, rotate=False, normalize=False): 
        """
        Returns the ray corresponding to the points. 
        Optionally undistort (defaults to true), and 
        rotate ray to the camera's viewpoint 
        """
        upts = self.undistort_points(pts) if undistort else pts
        ret = unproject_points(
            np.hstack([ (colvec(upts[:,0])-self.cx) / self.fx, (colvec(upts[:,1])-self.cy) / self.fy ])
        )

        if rotate: 
            ret = self.extrinsics.rotate_vec(ret)

        if normalize: 
            ret = ret / np.linalg.norm(ret, axis=1)[:, np.newaxis]

        return ret
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=True)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def make_gaussian(size, fwhm=3, center=None):
    """ Make a square gaussian kernel.
    size is the length of a side of the square
    fwhm is full-width-half-maximum, which
    can be thought of as an effective radius.
    """

    x = np.arange(0, size, 1, float)
    y = x[:, np.newaxis]

    if center is None:
        x0 = y0 = size // 2
    else:
        x0 = center[0]
        y0 = center[1]

    return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / 2.0 / fwhm / fwhm)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def roiChanged(self):
        if self.image is None:
            return

        image = self.getProcessedImage()
        if image.ndim == 2:
            axes = (0, 1)
        elif image.ndim == 3:
            axes = (1, 2)
        else:
            return

        data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
        if data is not None:
            while data.ndim > 1:
                data = data.mean(axis=1)
            if image.ndim == 3:
                self.roiCurve.setData(y=data, x=self.tVals)
            else:
                while coords.ndim > 2:
                    coords = coords[:,:,0]
                coords = coords - coords[:,0,np.newaxis]
                xvals = (coords**2).sum(axis=0) ** 0.5
                self.roiCurve.setData(y=data, x=xvals)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def generatePath(self, x, y):
        if self.opts['stepMode']:
            ## each value in the x/y arrays generates 2 points.
            x2 = np.empty((len(x),2), dtype=x.dtype)
            x2[:] = x[:,np.newaxis]
            if self.opts['fillLevel'] is None:
                x = x2.reshape(x2.size)[1:-1]
                y2 = np.empty((len(y),2), dtype=y.dtype)
                y2[:] = y[:,np.newaxis]
                y = y2.reshape(y2.size)
            else:
                ## If we have a fill level, add two extra points at either end
                x = x2.reshape(x2.size)
                y2 = np.empty((len(y)+2,2), dtype=y.dtype)
                y2[1:-1] = y[:,np.newaxis]
                y = y2.reshape(y2.size)[1:-1]
                y[0] = self.opts['fillLevel']
                y[-1] = self.opts['fillLevel']

        path = fn.arrayToQPath(x, y, connect=self.opts['connect'])

        return path
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def faceNormals(self, indexed=None):
        """
        Return an array (Nf, 3) of normal vectors for each face.
        If indexed='faces', then instead return an indexed array
        (Nf, 3, 3)  (this is just the same array with each vector
        copied three times).
        """
        if self._faceNormals is None:
            v = self.vertexes(indexed='faces')
            self._faceNormals = np.cross(v[:,1]-v[:,0], v[:,2]-v[:,0])

        if indexed is None:
            return self._faceNormals
        elif indexed == 'faces':
            if self._faceNormalsIndexedByFaces is None:
                norms = np.empty((self._faceNormals.shape[0], 3, 3))
                norms[:] = self._faceNormals[:,np.newaxis,:]
                self._faceNormalsIndexedByFaces = norms
            return self._faceNormalsIndexedByFaces
        else:
            raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def roiChanged(self):
        if self.image is None:
            return

        image = self.getProcessedImage()
        if image.ndim == 2:
            axes = (0, 1)
        elif image.ndim == 3:
            axes = (1, 2)
        else:
            return

        data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
        if data is not None:
            while data.ndim > 1:
                data = data.mean(axis=1)
            if image.ndim == 3:
                self.roiCurve.setData(y=data, x=self.tVals)
            else:
                while coords.ndim > 2:
                    coords = coords[:,:,0]
                coords = coords - coords[:,0,np.newaxis]
                xvals = (coords**2).sum(axis=0) ** 0.5
                self.roiCurve.setData(y=data, x=xvals)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def generatePath(self, x, y):
        if self.opts['stepMode']:
            ## each value in the x/y arrays generates 2 points.
            x2 = np.empty((len(x),2), dtype=x.dtype)
            x2[:] = x[:,np.newaxis]
            if self.opts['fillLevel'] is None:
                x = x2.reshape(x2.size)[1:-1]
                y2 = np.empty((len(y),2), dtype=y.dtype)
                y2[:] = y[:,np.newaxis]
                y = y2.reshape(y2.size)
            else:
                ## If we have a fill level, add two extra points at either end
                x = x2.reshape(x2.size)
                y2 = np.empty((len(y)+2,2), dtype=y.dtype)
                y2[1:-1] = y[:,np.newaxis]
                y = y2.reshape(y2.size)[1:-1]
                y[0] = self.opts['fillLevel']
                y[-1] = self.opts['fillLevel']

        path = fn.arrayToQPath(x, y, connect=self.opts['connect'])

        return path
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def faceNormals(self, indexed=None):
        """
        Return an array (Nf, 3) of normal vectors for each face.
        If indexed='faces', then instead return an indexed array
        (Nf, 3, 3)  (this is just the same array with each vector
        copied three times).
        """
        if self._faceNormals is None:
            v = self.vertexes(indexed='faces')
            self._faceNormals = np.cross(v[:,1]-v[:,0], v[:,2]-v[:,0])

        if indexed is None:
            return self._faceNormals
        elif indexed == 'faces':
            if self._faceNormalsIndexedByFaces is None:
                norms = np.empty((self._faceNormals.shape[0], 3, 3))
                norms[:] = self._faceNormals[:,np.newaxis,:]
                self._faceNormalsIndexedByFaces = norms
            return self._faceNormalsIndexedByFaces
        else:
            raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
项目:kaggle-review    作者:daxiongshu    | 项目源码 | 文件源码
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
    """Multi class version of Logarithmic Loss metric.
    https://www.kaggle.com/wiki/MultiClassLogLoss
    Parameters
    ----------
    y_true : array, shape = [n_samples]
            true class, intergers in [0, n_classes - 1)
    y_pred : array, shape = [n_samples, n_classes]
    Returns
    -------
    loss : float
    """
    predictions = np.clip(y_pred, eps, 1 - eps)

    # normalize row sums to 1
    predictions /= predictions.sum(axis=1)[:, np.newaxis]

    actual = np.zeros(y_pred.shape)
    n_samples = actual.shape[0]
    actual[np.arange(n_samples), y_true.astype(int)] = 1
    vectsum = np.sum(actual * np.log(predictions))
    loss = -1.0 / n_samples * vectsum
    return loss
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def rotation_from_axes(x_axis, y_axis, z_axis):
        """Convert specification of axis in target frame to
        a rotation matrix from source to target frame.

        Parameters
        ----------
        x_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's x-axis.

        y_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's y-axis.

        z_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's z-axis.

        Returns
        -------
        :obj:`numpy.ndarray` of float
            A 3x3 rotation matrix that transforms from a source frame to the
            given target frame.
        """
        return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis]))
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def _preprocess_data(self, data):
        """Converts the data array to the preferred dim x #points structure.

        Parameters
        ----------
        data : :obj:`numpy.ndarray` of float
            The data to process.

        Returns
        -------
        :obj:`numpy.ndarray` of float
            The same data array, but reshapes lists to be dim x 1.
        """
        if len(data.shape) == 1: 
            data = data[:,np.newaxis]
        return data
项目:python-utils    作者:zhijian-liu    | 项目源码 | 文件源码
def add(self, outputs, targets):
        outputs = to_numpy(outputs)
        targets = to_numpy(targets)

        if np.ndim(targets) == 2:
            targets = np.argmax(targets, 1)

        assert np.ndim(outputs) == 2, 'wrong output size (2D expected)'
        assert np.ndim(targets) == 1, 'wrong target size (1D or 2D expected)'
        assert targets.shape[0] == outputs.shape[0], 'number of outputs and targets do not match'

        top_k = self.top_k
        max_k = int(top_k[-1])

        predict = torch.from_numpy(outputs).topk(max_k, 1, True, True)[1].numpy()
        correct = (predict == targets[:, np.newaxis].repeat(predict.shape[1], 1))

        self.size += targets.shape[0]
        for k in top_k:
            self.corrects[k] += correct[:, :k].sum()
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def transform(im, pixel_means, need_mean=False):
    """
    transform into mxnet tensor
    substract pixel size and transform to correct format
    :param im: [height, width, channel] in BGR
    :param pixel_means: [[[R, G, B pixel means]]]
    :return: [batch, channel, height, width]
    """
    im = im.copy()
    im[:, :, (0, 1, 2)] = im[:, :, (2, 1, 0)]
    im = im.astype(float)
    if need_mean:
        im -= pixel_means
    im_tensor = im[np.newaxis, :]
    # put channel first
    channel_swap = (0, 3, 1, 2)
    im_tensor = im_tensor.transpose(channel_swap)
    return im_tensor
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def setup(self, bottom, top):
        # (1, 3, 1, 1) shaped arrays
        self.PIXEL_MEANS = \
            np.array([[[[0.48462227599918]],
                       [[0.45624044862054]],
                       [[0.40588363755159]]]])
        self.PIXEL_STDS = \
            np.array([[[[0.22889466674951]],
                       [[0.22446679341259]],
                       [[0.22495548344775]]]])
        # The default ("old") pixel means that were already subtracted
        channel_swap = (0, 3, 1, 2)
        self.OLD_PIXEL_MEANS = \
            cfg.PIXEL_MEANS[np.newaxis, :, :, :].transpose(channel_swap)

        top[0].reshape(*(bottom[0].shape))
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def calc_ps3d(self):
        """
        Calculate the 3D power spectrum of the image cube.

        The power spectrum is properly normalized to have dimension
        of [K^2 Mpc^3].
        """
        if self.window is not None:
            logger.info("Applying window along frequency axis ...")
            self.cube *= self.window[:, np.newaxis, np.newaxis]

        logger.info("3D FFTing data cube ...")
        cubefft = fftpack.fftshift(fftpack.fftn(self.cube))

        logger.info("Calculating 3D power spectrum ...")
        ps3d = np.abs(cubefft) ** 2  # [K^2]
        # Normalization
        norm1 = 1 / (self.Nx * self.Ny * self.Nz)
        norm2 = 1 / (self.fs_xy**2 * self.fs_z)  # [Mpc^3]
        norm3 = 1 / (2*np.pi)**3
        self.ps3d = ps3d * norm1 * norm2 * norm3  # [K^2 Mpc^3]
        return self.ps3d
项目:prysm    作者:brandondube    | 项目源码 | 文件源码
def gaussian(sigma=0.5, samples=128):
    ''' Generates a gaussian mask with a given sigma

    Args:
        sigma (`float`): width parameter of the gaussian, expressed in radii of
            the output array.

        samples (`int`): number of samples in square array.

    Returns:
        `numpy.ndarray`: mask with gaussian shape.

    '''
    s = sigma

    x = np.arange(0, samples, 1, float)
    y = x[:, np.newaxis]

    # // is floor division in python
    x0 = y0 = samples // 2
    return exp(-4 * log(2) * ((x - x0 ** 2) + (y - y0) ** 2) / (s * samples) ** 2)
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def process(self, wave):
        wave.check_mono()
        if wave.sample_rate != self.sr:
            raise Exception("Wrong sample rate")                              
        n = int(np.ceil(2 * wave.num_frames / float(self.w_len)))
        m = (n + 1) * self.w_len / 2 
        swindow = self.make_signal_window(n)
        win_ratios = [self.window / swindow[t * self.w_len / 2 : 
            t * self.w_len / 2 + self.w_len] 
            for t in range(n)]
        wave = wave.zero_pad(0, int(m - wave.num_frames))
        wave = audio.Wave(signal.hilbert(wave), wave.sample_rate)        
        result = np.zeros((self.n_bins, n))

        for b in range(self.n_bins): 
            w = self.widths[b]
            wc = 1 / np.square(w + 1)
            filter = self.filters[b]
            band = fftfilt(filter, wave.zero_pad(0, int(2 * w))[:,0])
            band = band[int(w) : int(w + m), np.newaxis]    
            for t in range(n):
                frame = band[t * self.w_len / 2:
                             t * self.w_len / 2 + self.w_len,:] * win_ratios[t]
                result[b, t] =  wc * np.real(np.conj(np.dot(frame.conj().T, frame)))
        return audio.Spectrogram(result, self.sr, self.w_len, self.w_len / 2)
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def add(self, x, y = None):
        self.X =  np.memmap(
            self.path+"/X.npy", self.X.dtype,
            shape = (self.nrows + x.shape[0] , x.shape[1])
        )
        self.X[self.nrows:self.nrows + x.shape[0],:] = x

        if y is not None:
            if x.shape != y.shape: raise "x and y should have the same shape"
            self.Y = np.memmap(
                self.path+"/Y.npy", self.Y.dtype,
                shape = (self.nrows + y.shape[0] , y.shape[1])
            )
            self.Y[self.nrows:self.nrows + y.shape[0],:] = y

        delta = x - self.running_mean
        n = self.X.shape[0] + np.arange(x.shape[0]) + 1
        self.running_dev += np.sum(delta * (x - self.running_mean), 0)
        self.running_mean += np.sum(delta / n[:, np.newaxis], 0)
        self.running_max  = np.amax(np.vstack((self.running_max, x)), 0)
        self.running_min  = np.amin(np.vstack((self.running_min, x)), 0)
        self.nrows += x.shape[0]
项目:chainer-spatial-transformer-networks    作者:hvy    | 项目源码 | 文件源码
def transform_mnist_rts(in_data):
    img, label = in_data
    img = img[0]  # Remove channel axis for skimage manipulation

    # Rotate
    img = transform.rotate(img, angle=np.random.uniform(-45, 45),
                           resize=True, mode='constant')
    #  Scale
    img = transform.rescale(img, scale=np.random.uniform(0.7, 1.2),
                            mode='constant')

    # Translate
    h, w = img.shape
    if h >= img_size[0] or w >= img_size[1]:
        img = transform.resize(img, output_shape=img_size, mode='constant')
        img = img.astype(np.float32)
    else:
        img_canvas = np.zeros(img_size, dtype=np.float32)
        ymin = np.random.randint(0, img_size[0] - h)
        xmin = np.random.randint(0, img_size[1] - w)
        img_canvas[ymin:ymin+h, xmin:xmin+w] = img
        img = img_canvas

    img = img[np.newaxis, :]  # Add the bach channel back
    return img, label
项目:train-DeepLab    作者:martinkersner    | 项目源码 | 文件源码
def predict(self, inputs):
    # uses MEMORY_DATA layer for loading images and postprocessing DENSE_CRF layer
    img = inputs[0].transpose((2, 0, 1))
    img = img[np.newaxis, :].astype(np.float32)
    label = np.zeros((1, 1, 1, 1), np.float32)
    data_dim = np.zeros((1, 1, 1, 2), np.float32)
    data_dim[0][0][0][0] = img.shape[2]
    data_dim[0][0][0][1] = img.shape[3]

    img      = np.ascontiguousarray(img, dtype=np.float32)
    label    = np.ascontiguousarray(label, dtype=np.float32)
    data_dim = np.ascontiguousarray(data_dim, dtype=np.float32)

    self.set_input_arrays(img, label, data_dim)
    out = self.forward()

    predictions = out[self.outputs[0]] # the output layer should be called crf_inf
    segm_result = predictions[0].argmax(axis=0).astype(np.uint8)

    return segm_result
项目:OASIS    作者:j-friedrich    | 项目源码 | 文件源码
def estimate_time_constant(y, p=2, sn=None, lags=5, fudge_factor=1.):
    """
    Estimate AR model parameters through the autocovariance function

    Parameters
    ----------
    y : array, shape (T,)
        One dimensional array containing the fluorescence intensities with
        one entry per time-bin.
    p : positive integer
        order of AR system
    sn : float
        sn standard deviation, estimated if not provided.
    lags : positive integer
        number of additional lags where he autocovariance is computed
    fudge_factor : float (0< fudge_factor <= 1)
        shrinkage factor to reduce bias

    Returns
    -------
    g : estimated coefficients of the AR process
    """

    if sn is None:
        sn = GetSn(y)

    lags += p
    xc = axcov(y, lags)
    xc = xc[:, np.newaxis]

    A = scipy.linalg.toeplitz(xc[lags + np.arange(lags)],
                              xc[lags + np.arange(p)]) - sn**2 * np.eye(lags, p)
    g = np.linalg.lstsq(A, xc[lags + 1:])[0]
    gr = np.roots(np.concatenate([np.array([1]), -g.flatten()]))
    gr = (gr + gr.conjugate()) / 2.
    gr[gr > 1] = 0.95 + np.random.normal(0, 0.01, np.sum(gr > 1))
    gr[gr < 0] = 0.15 + np.random.normal(0, 0.01, np.sum(gr < 0))
    g = np.poly(fudge_factor * gr)
    g = -g[1:]

    return g.flatten()
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
        # Params:
        #   J - column indices

        nnz_i = len(J)
        residual_i = y_i - mu0 - c[J]
        prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
        v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
        post_Phi_i = prior_Phi + \
                     np.dot(v_T.T,
                            np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T)  # Weighted sum of v_j * v_j.T
        post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_i)
        post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
                                                                                   lower=lower)
        ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
        r_i = ru_i[0]
        u_i = ru_i[1:]

        return r_i, u_i
项目:probabilistic-matrix-factorization    作者:aki-nishimura    | 项目源码 | 文件源码
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):

        prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
        nnz_j = len(I)
        residual_j = y_j - mu0 - r[I]
        u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
        post_Phi_j = prior_Phi + \
                     np.dot(u_T.T,
                            np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T)  # Weighted sum of u_i * u_i.T
        post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_j)
        post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
                                                                              lower=lower)
        cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
        c_j = cv_j[0]
        v_j = cv_j[1:]

        return c_j, v_j
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def weighted_linear_regression(self, x, y, weights):
        return np.linalg.inv((weights[:, np.newaxis] * x).T.dot(x)).dot((weights[:, np.newaxis] * x).T.dot(y))
项目:pylspm    作者:lseman    | 项目源码 | 文件源码
def expectation(self, dataSplit, coefficients, variances):

        assignment_weights = np.ones(
            (len(dataSplit), self.num_components), dtype=float)

        self.Q = len(self.endoVar)

        for k in range(self.num_components):

            coef_ = coefficients[k]

            Beta = coef_.ix[self.endoVar][self.endoVar]
            Gamma = coef_.ix[self.endoVar][self.exoVar]

            a_ = (np.dot(Beta, self.fscores[
                  self.endoVar].T) + np.dot(Gamma, self.fscores[self.exoVar].T))

            invert_ = np.linalg.inv(np.array(variances[k]))

            exponential = np.exp(-0.5 * np.dot(np.dot(a_.T, invert_), a_))

            den = (((2 * np.pi)**(self.Q / 2)) *
                   np.sqrt(np.linalg.det(variances[k])))
            probabilities = exponential / den
            probabilities = probabilities[0]
            assignment_weights[:, k] = probabilities

        assignment_weights /= assignment_weights.sum(axis=1)[:, np.newaxis]
 #       print(assignment_weights)
        return assignment_weights
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def contract_positions(XY, edges, stepsize):
    """Perturb vertex positions by an L1-minimizing attractive force.

    This is used to slightly adjust vertex positions to provide a visual
    hint to their grouping.

    Args:
        XY: A [V, 2]-shaped numpy array of the current positions.
        edges: An [E, 2]-shaped numpy array of edges as (vertex,vertex) pairs.

    """
    E = edges.shape[0]
    V = E + 1
    assert edges.shape == (E, 2)
    assert XY.shape == (V, 2)
    old = XY
    new = old.copy()
    heads = edges[:, 0]
    tails = edges[:, 1]
    diff = old[heads] - old[tails]
    distances = (diff**2).sum(axis=1)**0.5
    spacing = distances.min()
    assert spacing > 0
    diff /= distances[:, np.newaxis]
    diff *= spacing
    new[tails] += stepsize * diff
    new[heads] -= stepsize * diff
    return new
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def validate_gof(N, V, C, M, server, conditional):
    # Generate samples.
    expected = C**V
    num_samples = 1000 * expected
    ones = np.ones(V, dtype=np.int8)
    if conditional:
        cond_data = server.sample(1, ones)[0, :]
    else:
        cond_data = server.make_zero_row()
    samples = server.sample(num_samples, ones, cond_data)
    logprobs = server.logprob(samples + cond_data[np.newaxis, :])
    counts = {}
    probs = {}
    for sample, logprob in zip(samples, logprobs):
        key = tuple(sample)
        if key in counts:
            counts[key] += 1
        else:
            counts[key] = 1
            probs[key] = np.exp(logprob)
    assert len(counts) == expected

    # Check accuracy using Pearson's chi-squared test.
    keys = sorted(counts.keys(), key=lambda key: -probs[key])
    counts = np.array([counts[k] for k in keys], dtype=np.int32)
    probs = np.array([probs[k] for k in keys])
    probs /= probs.sum()

    # Truncate to avoid low-precision.
    truncated = False
    valid = (probs * num_samples > 20)
    if not valid.all():
        T = valid.argmin()
        T = max(8, T)  # Avoid truncating too much
        probs = probs[:T]
        counts = counts[:T]
        truncated = True

    gof = multinomial_goodness_of_fit(
        probs, counts, num_samples, plot=True, truncated=truncated)
    assert 1e-2 < gof
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def latent_correlation(self):
        """Compute correlation matrix among latent features.

        This computes the generalization of Pearson's correlation to discrete
        data. Let I(X;Y) be the mutual information. Then define correlation as

          rho(X,Y) = sqrt(1 - exp(-2 I(X;Y)))

        Returns:
            A [V, V]-shaped numpy array of feature-feature correlations.
        """
        logger.debug('computing latent correlation')
        V, E, M, R = self._VEMR
        edge_probs = self._edge_probs
        vert_probs = self._vert_probs
        result = np.zeros([V, V], np.float32)
        for root in range(V):
            messages = np.empty([V, M, M])
            program = make_propagation_program(self._tree.tree_grid, root)
            for op, v, v2, e in program:
                if op == OP_ROOT:
                    # Initialize correlation at this node.
                    messages[v, :, :] = np.diagflat(vert_probs[v, :])
                elif op == OP_OUT:
                    # Propagate correlation outward from parent to v.
                    trans = edge_probs[e, :, :]
                    if v > v2:
                        trans = trans.T
                    messages[v, :, :] = np.dot(  #
                        trans / vert_probs[v2, np.newaxis, :],
                        messages[v2, :, :])
            for v in range(V):
                result[root, v] = correlation(messages[v, :, :])
        return result
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def sample_from_probs2(probs, out=None):
    """Sample from multiple vectors of non-normalized probabilities.

    Args:
        probs: An [N, M]-shaped numpy array of non-normalized probabilities.
        out: An optional destination for the result.

    Returns:
        An [N]-shaped numpy array of integers in range(M).
    """
    # Adapted from https://stackoverflow.com/questions/40474436
    assert len(probs.shape) == 2
    cdf = probs.cumsum(axis=1)
    u = np.random.rand(probs.shape[0], 1) * cdf[:, -1, np.newaxis]
    return (u < cdf).argmax(axis=1, out=out)
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0):
        L = post['L']
        if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here
        Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L)))
        ns = diagKTestTest.shape[0]
        nperbatch = 5000
        nact = 0

        #allocate mem
        fmu = np.zeros(ns)  #column vector (of length ns) of predictive latent means
        fs2 = np.zeros(ns)  #column vector (of length ns) of predictive latent variances
        while (nact<(ns-1)):
            id = np.arange(nact, np.minimum(nact+nperbatch, ns))
            kss = diagKTestTest[id]     
            Ks = KtrainTest[:, id]
            if (len(post['alpha'].shape) == 1):
                try: Fmu = intercept[id] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu
            else:
                try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha'])
                except: Fmu = intercept + Ks.T.dot(post['alpha'])
                fmu[id] = Fmu.mean(axis=1)
            if Lchol:
                V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True)
                fs2[id] = kss - np.sum(V**2, axis=0)                       #predictive variances                        
            else:
                fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0)           #predictive variances
            fs2[id] = np.maximum(fs2[id],0)  #remove numerical noise i.e. negative variances        
            nact = id[-1]    #set counter to index of last processed data point

        return fmu, fs2
项目:MKLMM    作者:omerwe    | 项目源码 | 文件源码
def sq_dist(a, b=None):
    #mean-center for numerical stability
    D, n = a.shape[0], a.shape[1]
    if (b is None):
        mu = a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b = a
        m = n
        aSq = np.sum(a**2, axis=0)
        bSq = aSq
    else:
        d, m = b.shape[0], b.shape[1]
        if (d != D): raise Exception('column lengths must agree')
        mu = (float(m)/float(m+n))*b.mean(axis=1) + (float(n)/float(m+n))*a.mean(axis=1)
        a -= mu[:, np.newaxis]
        b -= mu[:, np.newaxis]      
        aSq = np.sum(a**2, axis=0)
        bSq = np.sum(b**2, axis=0)

    C = np.tile(np.column_stack(aSq).T, (1, m)) + np.tile(bSq, (n, 1)) - 2*a.T.dot(b)
    C = np.maximum(C, 0)    #remove numerical noise
    return C

#evaluate 'power sums' of the individual terms in Z
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def generate_markers_3d(image):
    #Creation of the internal Marker
    marker_internal = image < -400
    marker_internal_labels = np.zeros(image.shape).astype(np.int16)
    for i in range(marker_internal.shape[0]):
        marker_internal[i] = segmentation.clear_border(marker_internal[i])
        marker_internal_labels[i] = measure.label(marker_internal[i])
    #areas = [r.area for r in measure.regionprops(marker_internal_labels)]
    areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
    for i in range(marker_internal.shape[0]):
        areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
        areas.sort()
        if len(areas) > 2:
            for region in measure.regionprops(marker_internal_labels[i]):
                if region.area < areas[-2]:
                    for coordinates in region.coords:                
                           marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
    marker_internal = marker_internal_labels > 0
    #Creation of the external Marker

    # 3x3 structuring element with connectivity 1, used by default
    struct1 = ndimage.generate_binary_structure(2, 1)
    struct1 = struct1[np.newaxis,:,:]  # expand by z axis .

    external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
    external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
    marker_external = external_b ^ external_a
    #Creation of the Watershed Marker matrix
    #marker_watershed = np.zeros((512, 512), dtype=np.int)  # origi
    marker_watershed = np.zeros((marker_external.shape), dtype=np.int)

    marker_watershed += marker_internal * 255
    marker_watershed += marker_external * 128

    return marker_internal, marker_external, marker_watershed
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def prediction_time(self, X):
        start_time = time.time()
        dp = {}
        for key, value in X.items():
            dp[key] = value[np.newaxis, 0]
        times = 100
        for _ in range(times):
            self.predict_proba(dp)
        return (time.time() - start_time) / times
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def prediction_time(self, X):
        start_time = time.time()
        dp = {}
        for key, value in X.items():
            dp[key] = value[np.newaxis, 0]
        times = 100
        for _ in range(times):
            self.predict_proba(dp)
        return (time.time() - start_time) / times
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def prediction_time(self, X):
        start_time = time.time()
        dp = {}
        for key, value in X.items():
            dp[key] = value[np.newaxis, 0]
        times = 100
        for _ in range(times):
            self.predict_proba(dp)
        return (time.time() - start_time) / times
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def prediction_time(self, X):
        start_time = time.time()
        dp = {}
        for key, value in X.items():
            dp[key] = value[np.newaxis, 0]
        times = 100
        for _ in range(times):
            self.predict_proba(dp)
        return (time.time() - start_time) / times
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def prediction_time(self, X):
        start_time = time.time()
        dp = {}
        for key, value in X.items():
            dp[key] = value[np.newaxis, 0]
        times = 100
        for _ in range(times):
            self.predict_proba(dp)
        return (time.time() - start_time) / times