Python numpy 模块,roll() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.roll()

项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
项目:pycos    作者:pgiri    | 项目源码 | 文件源码
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def numpy_groupby(values, keys):
    """ Group a collection of numpy arrays by key arrays.
        Yields (key_tuple, view_tuple) where key_tuple is the key grouped on and view_tuple is a tuple of views into the value arrays.
          values: tuple of arrays to group
          keys: tuple of sorted, numeric arrays to group by """

    if len(values) == 0:
        return
    if len(values[0]) == 0:
        return

    for key_array in keys:
        assert len(key_array) == len(keys[0])
    for value_array in values:
        assert len(value_array) == len(keys[0])

    # The indices where any of the keys differ from the previous key become group boundaries
    key_change_indices = np.logical_or.reduce(tuple(np.concatenate(([1], np.diff(key))) != 0 for key in keys))
    group_starts = np.flatnonzero(key_change_indices)
    group_ends = np.roll(group_starts, -1)
    group_ends[-1] = len(keys[0])

    for group_start, group_end in itertools.izip(group_starts, group_ends):
        yield tuple(key[group_start] for key in keys), tuple(value[group_start:group_end] for value in values)
项目:snake_game    作者:wing3s    | 项目源码 | 文件源码
def play(self, nb_rounds):
        img_saver = save_image()
        img_saver.next()

        game_cnt = it.count(1)
        for i in xrange(nb_rounds):
            game = self.game(width=self.width, height=self.height)
            screen, _ = game.next()
            img_saver.send(screen)
            frame_cnt = it.count()
            try:
                state = np.asarray([screen] * self.nb_frames)
                while True:
                    frame_cnt.next()
                    act_idx = np.argmax(
                        self.model.predict(state[np.newaxis]), axis=-1)[0]
                    screen, _ = game.send(self.actions[act_idx])
                    state = np.roll(state, 1, axis=0)
                    state[0] = screen
                    img_saver.send(screen)
            except StopIteration:
                print 'Saved %4i frames for game %3i' % (
                    frame_cnt.next(), game_cnt.next())
        img_saver.close()
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def shift_dataset(m,boundarynoise):
    if boundarynoise==0:
        return m
    nonzero_rows=np.where(m.any(axis=1))[0]
    small_m=copy.deepcopy(m)
    small_m=small_m[nonzero_rows,:]
    small_m=small_m[:,nonzero_rows]
    print small_m
    print 'roll'
    small_m=np.roll(small_m,boundarynoise,axis=0)
    print small_m
    print 'roll2'
    small_m=np.roll(small_m,boundarynoise,axis=1)
    print small_m
    outm=np.zeros(m.shape)
    for i_idx in range(len(nonzero_rows)):
        i=nonzero_rows[i_idx]
        for j_idx in range(i_idx,len(nonzero_rows)):
            j=nonzero_rows[j_idx]
            outm[i,j]=small_m[i_idx,j_idx]
            outm[j,i]=outm[i,j]
    return outm
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def points_and_normals(self): 
        """
        Returns the point/normals parametrization for planes, 
        including clipped zmin and zmax frustums

        Note: points need to be in CCW
        """

        nv1, fv1 = self._front_back_vertices
        nv2 = np.roll(nv1, -1, axis=0)
        fv2 = np.roll(fv1, -1, axis=0)

        vx = np.vstack([fv1-nv1, nv2[0]-nv1[0], fv1[2]-fv1[1]])
        vy = np.vstack([fv2-fv1, nv2[1]-nv2[0], fv1[1]-fv1[0]])
        pts = np.vstack([fv1, nv1[0], fv1[1]])

        # vx += 1e-12
        # vy += 1e-12

        vx /= np.linalg.norm(vx, axis=1).reshape(-1,1)
        vy /= np.linalg.norm(vy, axis=1).reshape(-1,1)

        normals = np.cross(vx, vy)
        normals /= np.linalg.norm(normals, axis=1).reshape(-1,1)
        return pts, normals
项目:DenoiseAverage    作者:Pella86    | 项目源码 | 文件源码
def correlate(self, imgfft):
        #Very much related to the convolution theorem, the cross-correlation
        #theorem states that the Fourier transform of the cross-correlation of
        #two functions is equal to the product of the individual Fourier
        #transforms, where one of them has been complex conjugated:  


        if self.imgfft is not 0 or imgfft.imgfft is not 0:
            imgcj = np.conjugate(self.imgfft)
            imgft = imgfft.imgfft

            prod = deepcopy(imgcj)
            for x in range(imgcj.shape[0]):
                for y in range(imgcj.shape[0]):
                    prod[x][y] = imgcj[x][y] * imgft[x][y]

            cc = Corr( np.real(fft.ifft2(fft.fftshift(prod)))) # real image of the correlation

            # adjust to center
            cc.data = np.roll(cc.data, int(cc.data.shape[0] / 2), axis = 0)
            cc.data = np.roll(cc.data, int(cc.data.shape[1] / 2), axis = 1)
        else:
            raise FFTnotInit()
        return cc
项目:inqbus.rainflow    作者:Inqbus    | 项目源码 | 文件源码
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
项目:pyshearlab    作者:stefanloock    | 项目源码 | 文件源码
def SLdshear(inputArray, k, axis):
    """
    Computes the discretized shearing operator for a given inputArray, shear
    number k and axis.

    This version is adapted such that the MATLAB indexing can be used here in the
    Python version.
    """
    axis = axis - 1
    if k==0:
        return inputArray
    rows = np.asarray(inputArray.shape)[0]
    cols = np.asarray(inputArray.shape)[1]

    shearedArray = np.zeros((rows, cols), dtype=inputArray.dtype)

    if axis == 0:
        for col in range(cols):
            shearedArray[:,col] = np.roll(inputArray[:,col], int(k * np.floor(cols/2-col)))
    else:
        for row in range(rows):
            shearedArray[row,:] = np.roll(inputArray[row,:], int(k * np.floor(rows/2-row)))
    return shearedArray
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def make_step(net, step_size=1.5, end='inception_4d/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def make_step(net, step_size=1.5, end='inception_5a/output', jitter=32, clip=False, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
项目:Sverchok    作者:Sverchok    | 项目源码 | 文件源码
def torus_faces(x, y):
    faces = np.empty((x * y, 4), dtype=np.uint32)
    tmp = np.arange(0, x * y)
    faces[:, 0] = tmp
    faces[:, 1] = np.roll(tmp, -y)
    tmp += 1
    tmp.shape = (x, y)
    tmp[:, y - 1] -= y
    tmp.shape = -1
    faces[:, 3] = tmp
    faces[:, 2] = np.roll(tmp, -y)
    faces.shape = -1
    l_total = np.empty(x * y, dtype=np.uint32)
    l_total[:] = 4
    l_start = np.arange(0, (x * y) * 4, 4, dtype=np.uint32)
    return SvPolygon(l_start, l_total, faces)
项目:semantic-segmentation    作者:albertbuchard    | 项目源码 | 文件源码
def fft_convolve(X,Y, inv = 0):

    XF = np.fft.rfft2(X)
    YF = np.fft.rfft2(Y)
#    YF0 = np.copy(YF)
#    YF.imag = 0
#    XF.imag = 0
    if inv == 1:
 #       plt.imshow(np.real(YF)); plt.colorbar(); plt.show()
        YF = np.conj(YF)

    SF = XF*YF

    S = np.fft.irfft2(SF)
    n1,n2 = np.shape(S)

    S = np.roll(S,-n1/2+1,axis = 0)
    S = np.roll(S,-n2/2+1,axis = 1)

    return np.real(S)
项目:mic_array    作者:respeaker    | 项目源码 | 文件源码
def _wakeup(self, direction=0):
        position = int((direction + 15) / 30) % 12

        basis = numpy.roll(self.basis, position * 4)
        for i in range(1, 25):
            pixels = basis * i
            self.write(pixels)
            time.sleep(0.005)

        pixels =  numpy.roll(pixels, 4)
        self.write(pixels)
        time.sleep(0.1)

        for i in range(2):
            new_pixels = numpy.roll(pixels, 4)
            self.write(new_pixels * 0.5 + pixels)
            pixels = new_pixels
            time.sleep(0.1)

        self.write(pixels)
        self.pixels = pixels
项目:mic_array    作者:respeaker    | 项目源码 | 文件源码
def _think(self):
        pixels = self.pixels

        self.next.clear()
        while not self.next.is_set():
            pixels = numpy.roll(pixels, 4)
            self.write(pixels)
            time.sleep(0.2)

        t = 0.1
        for i in range(0, 5):
            pixels = numpy.roll(pixels, 4)
            self.write(pixels * (4 - i) / 4)
            time.sleep(t)
            t /= 2

        # time.sleep(0.5)

        self.pixels = pixels
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_rolling_window(input_seq, batch_size, seq_len, strides):
    # This test checks if the rolling window works
    # We check if the first two samples in each batch are strided by strides

    # Truncate input sequence such that last section that doesn't fit in a batch
    # is thrown away
    input_seq = input_seq[:seq_len * batch_size * (len(input_seq) // seq_len // batch_size)]
    data_array = {'X': input_seq,
                  'y': np.roll(input_seq, axis=0, shift=-1)}
    time_steps = seq_len
    it_array = SequentialArrayIterator(data_arrays=data_array, time_steps=time_steps,
                                       stride=strides, batch_size=batch_size, tgt_key='y',
                                       shuffle=False)
    for idx, iter_val in enumerate(it_array):
        # Start of the array needs to be time_steps * idx
        assert np.array_equal(iter_val['X'][0, strides:time_steps],
                              iter_val['X'][1, :time_steps - strides])
        assert np.array_equal(iter_val['y'][0, strides:time_steps],
                              iter_val['y'][1, :time_steps - strides])
项目:quadpy    作者:nschloe    | 项目源码 | 文件源码
def pm_roll(n, v):
    '''Returns `2**k * n` number of points of dimension `n` such that

    p[0] = [+-v[0], ..., +-v[k], 0, ..., 0]
    p[1] = [0, +-v[0], ..., +-v[k], 0, ..., 0]
    ...
    p[n-1] = [+-v[1], ..., +-v[k], 0, ..., 0, +-v[0]]

    with all +- configurations.
    '''
    k = len(v)
    assert k <= n

    pm_v = pm_array(v)

    r0 = numpy.zeros((len(pm_v), n), dtype=pm_v.dtype)
    r0[:, :k] = pm_v

    return numpy.concatenate([
        numpy.roll(r0, i, axis=1)
        for i in range(n)
        ])


# TODO remove
项目:tensorpac    作者:EtienneCmb    | 项目源码 | 文件源码
def time_lag(pha, amp, axis):
    """Introduce a time lag on phase series..

    Parameters
    ----------
    pha : array_like
        Array of phases of shapes (npha, ..., npts)

    amp : array_like
        Array of amplitudes of shapes (namp, ..., npts)

    axis : int
        Location of the time axis.

    Returns
    -------
    pha : array_like
        Shiffted version of phases of shapes (npha, ..., npts)

    amp : array_like
        Original version of amplitudes of shapes (namp, ..., npts)
    """
    npts = pha.shape[-1]
    return np.roll(pha, np.random.randint(npts), axis=axis), amp
项目:4mics_hat    作者:respeaker    | 项目源码 | 文件源码
def wakeup(self, direction=0):
        position = int((direction + 15) / 30) % 12

        basis = numpy.roll(self.basis, position * 4)
        for i in range(1, 25):
            pixels = basis * i
            self.show(pixels)
            time.sleep(0.005)

        pixels =  numpy.roll(pixels, 4)
        self.show(pixels)
        time.sleep(0.1)

        for i in range(2):
            new_pixels = numpy.roll(pixels, 4)
            self.show(new_pixels * 0.5 + pixels)
            pixels = new_pixels
            time.sleep(0.1)

        self.show(pixels)
        self.pixels = pixels
项目:4mics_hat    作者:respeaker    | 项目源码 | 文件源码
def think(self):
        pixels = self.pixels

        while not self.stop:
            pixels = numpy.roll(pixels, 4)
            self.show(pixels)
            time.sleep(0.2)

        t = 0.1
        for i in range(0, 5):
            pixels = numpy.roll(pixels, 4)
            self.show(pixels * (4 - i) / 4)
            time.sleep(t)
            t /= 2

        self.pixels = pixels
项目:TensorFlow-Machine-Learning-Cookbook    作者:PacktPublishing    | 项目源码 | 文件源码
def calc_grad_tiled(img, t_grad, tile_size=512):
    '''Compute the value of tensor t_grad over the image in a tiled way.
    Random shifts are applied to the image to blur tile boundaries over 
    multiple iterations.'''
    # Pick a subregion square size
    sz = tile_size
    # Get the image height and width
    h, w = img.shape[:2]
    # Get a random shift amount in the x and y direction
    sx, sy = np.random.randint(sz, size=2)
    # Randomly shift the image (roll image) in the x and y directions
    img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
    # Initialize the while image gradient as zeros
    grad = np.zeros_like(img)
    # Now we loop through all the sub-tiles in the image
    for y in range(0, max(h-sz//2, sz),sz):
        for x in range(0, max(w-sz//2, sz),sz):
            # Select the sub image tile
            sub = img_shift[y:y+sz,x:x+sz]
            # Calculate the gradient for the tile
            g = sess.run(t_grad, {t_input:sub})
            # Apply the gradient of the tile to the whole image gradient
            grad[y:y+sz,x:x+sz] = g
    # Return the gradient, undoing the roll operation
    return np.roll(np.roll(grad, -sx, 1), -sy, 0)
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def column_cycle_array(posterior, amt=None):
    """Also called 'position cycle' by Kloosterman et al.
    If amt is an array of the same length as posterior, then
    cycle each column by the corresponding amount in amt.
    Otherwise, cycle each column by a random amount."""
    out = copy.deepcopy(posterior)
    rows, cols = posterior.shape

    if amt is None:
        for col in range(cols):
            if np.isnan(np.sum(posterior[:,col])):
                continue
            else:
                out[:,col] = np.roll(posterior[:,col], np.random.randint(1, rows))
    else:
        if len(amt) == cols:
            for col in range(cols):
                if np.isnan(np.sum(posterior[:,col])):
                    continue
                else:
                    out[:,col] = np.roll(posterior[:,col], int(amt[col]))
        else:
            raise TypeError("amt does not seem to be the correct shape!")
    return out
项目:nelpy    作者:nelpy    | 项目源码 | 文件源码
def _within_event_incoherent_shuffle(self, kind='train'):
        """Time cycle on BinnedSpikeTrainArray, cycling only within each epoch.
        We cycle each unit independently, within each epoch.
        """
        if kind == 'train':
            bst = self.PBEs_train
        elif kind == 'test':
            bst = self.PBEs_test
        else:
            raise ValueError("kind '{}' not understood!".format(kind))

        out = copy.deepcopy(bst) # should this be deep?
        data = out._data
        edges = np.insert(np.cumsum(bst.lengths),0,0)

        for uu in range(bst.n_units):
            for ii in range(bst.n_epochs):
                segment = np.squeeze(data[uu, edges[ii]:edges[ii+1]])
                segment = np.roll(segment, np.random.randint(len(segment)))
                data[uu, edges[ii]:edges[ii+1]] = segment

        if kind == 'train':
            self.PBEs_train = out
        else:
            self.PBEs_test = out
项目:Personal_AI_Assistant    作者:PratylenClub    | 项目源码 | 文件源码
def _augment_speech(mfcc):

    # random frequency shift ( == speed perturbation effect on MFCC )
    r = np.random.randint(-2, 2)

    # shifting mfcc
    mfcc = np.roll(mfcc, r, axis=0)

    # zero padding
    if r > 0:
        mfcc[:r, :] = 0
    elif r < 0:
        mfcc[r:, :] = 0

    return mfcc


# Speech Corpus
项目:quickshear    作者:nipy    | 项目源码 | 文件源码
def edge_mask(mask):
    """ Find the edges of a mask or masked image

    Parameters
    ----------
    mask : 3D array
        Binary mask (or masked image) with axis orientation LPS or RPS, and the
        non-brain region set to 0

    Returns
    -------
    2D array
        Outline of sagittal profile (PS orientation) of mask
    """
    # Sagittal profile
    brain = mask.any(axis=0)

    # Simple edge detection
    edgemask = 4 * brain - np.roll(brain, 1, 0) - np.roll(brain, -1, 0) - \
                           np.roll(brain, 1, 1) - np.roll(brain, -1, 1) != 0
    return edgemask.astype('uint8')
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def roll(u, shift):
    """
    Apply :func:`numpy.roll` to multiple array axes.

    Parameters
    ----------
    u : array_like
      Input array
    shift : array_like of int
      Shifts to apply to axes of input `u`

    Returns
    -------
    v : ndarray
      Output array
    """

    v = u.copy()
    for k in range(len(shift)):
        v = np.roll(v, shift[k], axis=k)
    return v
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def update(self, idxs, x):
        # Fetch the classes for the regression
        _, y = self.dataset.train_data[idxs]

        # If we are doing the regression in logspace
        if self.log:
            x = np.log(x)

        # Train the lstm so that it can predict x given the history
        self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x)

        # Update the history to include x
        full = idxs[self.cnts[idxs] == self.history.shape[1]]
        self.history[full] = np.roll(self.history[full], -1, axis=1)
        self.cnts[full] -= 1
        self.history[idxs, self.cnts[idxs], :1] = x
        self.cnts[idxs] += 1
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def update(self, idxs, x):
        # Fetch the classes for the regression
        _, y = self.dataset.train_data[idxs]

        # If we are doing the regression in logspace
        if self.log:
            x = np.log(x)

        # Train the lstm so that it can predict x given the history
        self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x)

        # Update the history to include x
        full = idxs[self.cnts[idxs] == self.history.shape[1]]
        self.history[full] = np.roll(self.history[full], -1, axis=1)
        self.cnts[full] -= 1
        self.history[idxs, self.cnts[idxs], :1] = x
        self.cnts[idxs] += 1
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def update(self, idxs, x):
        # Fetch the classes for the regression
        _, y = self.dataset.train_data[idxs]

        # If we are doing the regression in logspace
        if self.log:
            x = np.log(x)

        # Train the lstm so that it can predict x given the history
        self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x)

        # Update the history to include x
        full = idxs[self.cnts[idxs] == self.history.shape[1]]
        self.history[full] = np.roll(self.history[full], -1, axis=1)
        self.cnts[full] -= 1
        self.history[idxs, self.cnts[idxs], :1] = x
        self.cnts[idxs] += 1
项目:importance-sampling    作者:idiap    | 项目源码 | 文件源码
def update(self, idxs, x):
        # Fetch the classes for the regression
        _, y = self.dataset.train_data[idxs]

        # If we are doing the regression in logspace
        if self.log:
            x = np.log(x)

        # Train the lstm so that it can predict x given the history
        self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x)

        # Update the history to include x
        full = idxs[self.cnts[idxs] == self.history.shape[1]]
        self.history[full] = np.roll(self.history[full], -1, axis=1)
        self.cnts[full] -= 1
        self.history[idxs, self.cnts[idxs], :1] = x
        self.cnts[idxs] += 1
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def uwt_align_h2(X, inverse=False):
    """UWT h2 coefficients aligment.

    If inverse = True performs the misalignment
    for a correct reconstruction.
    """

    J = X.shape[0] / 2
    shifts = np.asarray([2 ** j for j in range(J)])

    if not inverse:
        shifts *= -1

    for j in range(J):
        X[j] = np.roll(X[j], shifts[j])
        X[j + J] = np.roll(X[j + J], shifts[j])
项目:PySAT    作者:USGS-Astrogeology    | 项目源码 | 文件源码
def uwt_align_d4(X, inverse=False):
    """UWT d4 coefficients aligment.

    If inverse = True performs the misalignment
    for a correct reconstruction.
    """
    J = X.shape[0] / 2
    w_shifts = np.asarray([(3 * 2 ** j) - 1 for j in range(J)])
    v_shifts = np.asarray([1] + [(2 ** (j + 1) - 1) for j in range(1, J)])

    if not inverse:
        w_shifts *= -1
        v_shifts *= -1

    for j in range(J):
        X[j] = np.roll(X[j], w_shifts[j])
        X[j + J] = np.roll(X[j + J], v_shifts[j])
项目:Fluid2d    作者:pvthinker    | 项目源码 | 文件源码
def finalize(self,mskp_model):
        print('found %i islands'%self.nbisland)
        mskp = zeros((self.nyl,self.nxl),dtype=int8)
        work = zeros((self.nyl,self.nxl))
        mskr = zeros((self.nyl,self.nxl))
        for k in range(self.nbisland):
            idx  = self.data[k]['idx']
            psi0 = self.data[k]['psi0']
            mskr[:,:]=1.
            mskp[:,:]=0
            mskr[idx]=0.
            celltocorner(mskr,work)
            mskp[work==1]=1
            mskp=1-mskp


            vort = (roll(mskp,-1,axis=1)+roll(mskp,-1,axis=0)
                   +roll(mskp,+1,axis=1)+roll(mskp,+1,axis=0) )

            z=(vort)*psi0/self.dx**2#*(1-mskp)
            self.rhsp[vort>0] = z[vort>0]
            self.psi[mskp==1]=psi0
#            print(self.psi[:,10])
        print('island are ok')
项目:catchy    作者:jvbalen    | 项目源码 | 文件源码
def to_intervals(X):

    def _roll_rows(x):
        """ Circularly shift ('roll') rows i in array by -i, recursively.
        If 2d-array: circularly shift each row i to the left, i times so that
            X(i, j-i) = X(i, j)
        If 3d-array (or 4d, 5d..):
            X(i, j-i, k-j) = X(i, j, k)
        """
        if len(x.shape) > 2:
            x = np.array([_roll_rows(xi) for xi in x])
        elif len(x.shape) == 1:
            raise ValueError('Method requires nd-array with n >= 2.')
        x_rolled = np.array([np.roll(xi, -i, axis=0) for i, xi in enumerate(x)])
        return x_rolled

    X_rolled = _roll_rows(X)

    X_inv = np.sum(X_rolled, axis=0)

    return X_inv


## ------------------------- feature alignment
项目:Splipy    作者:sintefmath    | 项目源码 | 文件源码
def lower_periodic(self, periodic, direction=0):
        """  Sets the periodicity of the spline object in the given direction,
        keeping the geometry unchanged.

        :param int periodic: new periodicity, i.e. the basis is C^k over the start/end
        :param int direction: the parametric direction of the basis to modify
        :return: self
        """
        direction = check_direction(direction, self.pardim)

        b  = self.bases[direction]
        while periodic < b.periodic:
            self.insert_knot(self.start(direction), direction)
            self.controlpoints = np.roll(self.controlpoints, -1, direction)
            b.roll(1)
            b.periodic -= 1
            b.knots = b.knots[:-1]
        if periodic > b.periodic:
            raise ValueError('Cannot raise periodicity')

        return self
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def sharpenOld(s, kernelFunc, dist=None, scale=None,
            normalize=False, m1=False, *args, **kwargs):
    s = util.colmat(s)

    if dist is None:
        dist = np.arange(s.shape[1])+1.0
        dist = np.abs(dist[None,:]-dist[:,None])

        #dist = np.insert(spsig.triang(s.shape[1]-1, sym=False), 0, 0.0)
        #dist = np.vstack([np.roll(dist, i) for i in xrange(dist.size)])

    if scale is None:
        # minimum off-diagonal distance
        scale = np.min(dist[np.asarray(1.0-np.eye(dist.shape[0]), dtype=np.bool)])

    kernel = kernelFunc(dist.T/scale, *args, **kwargs)

    if m1:
        np.fill_diagonal(kernel, 0.0)

    if normalize:
        kernel = kernel/np.abs(kernel.sum(axis=0))

    return s - s.dot(kernel)
项目:blur    作者:ajyoon    | 项目源码 | 文件源码
def get_samples(self, sample_count):
        """
        Fetch a number of samples from self.wave_cache

        Args:
            sample_count (int): Number of samples to fetch

        Returns: ndarray
        """
        if self.amplitude.value <= 0:
            return None
        # Build samples by rolling the period cache through the buffer
        rolled_array = numpy.roll(self.wave_cache,
                                  -1 * self.last_played_sample)
        # Append remaining partial period
        full_count, remainder = divmod(sample_count, self.cache_length)
        final_subarray = rolled_array[:int(remainder)]
        return_array = numpy.concatenate((numpy.tile(rolled_array, full_count),
                                          final_subarray))
        # Keep track of where we left off to prevent popping between chunks
        self.last_played_sample = int(((self.last_played_sample + remainder) %
                                       self.cache_length))
        # Multiply output by amplitude
        return return_array * (self.amplitude.value *
                               self.amplitude_multiplier)
项目:information-bottleneck    作者:djstrouse    | 项目源码 | 文件源码
def gen_blurred_diag_pxy(s):
    X = 1024
    Y = X

    # generate pdf
    from scipy.stats import multivariate_normal
    pxy = np.zeros((X,Y))
    rv = multivariate_normal(cov=s)
    for x in range(X):        
        pxy[x,:] = np.roll(rv.pdf(np.linspace(-X/2,X/2,X+1)[:-1]),int(X/2+x))
    pxy = pxy/np.sum(pxy)

    # plot p(x,y)
    import matplotlib.pyplot as plt
    plt.figure()
    plt.contourf(pxy)
    plt.ion()
    plt.title("p(x,y)")
    plt.show()

    return pxy
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def _roll_data(self):
        """
        Roll window worth of data up to position zero.
        Save the effort of having to expensively roll at each iteration
        """

        self.buffer.values[:, :self._window, :] = \
            self.buffer.values[:, -self._window:, :]
        self.date_buf[:self._window] = self.date_buf[-self._window:]
        self._pos = self._window
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def _roll_data(self):
        """
        Roll window worth of data up to position zero.
        Save the effort of having to expensively roll at each iteration
        """

        self.buffer.values[:, :self._window, :] = \
            self.buffer.values[:, -self._window:, :]
        self.date_buf[:self._window] = self.date_buf[-self._window:]
        self._pos = self._window
项目:pi_gcs    作者:lbusoni    | 项目源码 | 文件源码
def setSinusoidalWaveform(self,
                              waveTableId,
                              append,
                              lengthInPoints,
                              amplitudeOfTheSineCurve,
                              offsetOfTheSineCurve,
                              wavelengthOfTheSineCurveInPoints,
                              startPoint,
                              curveCenterPoint):
        '''
        See description of PI_WAV_SIN_P in PI GCS 2.0 DLL doc
        '''
        curveCenterPoint= int(round(curveCenterPoint))
        wavelengthOfTheSineCurveInPoints= \
            int(round(wavelengthOfTheSineCurveInPoints))
        startPoint= int(round(startPoint))
        lengthInPoints= int(round(lengthInPoints))
        assert append == WaveformGenerator.CLEAR, 'only CLEAR implemented'
        assert startPoint >= 0
        assert startPoint < lengthInPoints
        assert curveCenterPoint >= 0
        assert startPoint + curveCenterPoint < lengthInPoints

        ccUp= 0.5* curveCenterPoint
        rampUp= 0.5 * amplitudeOfTheSineCurve* (1 + np.sin(
            np.arange(-ccUp, ccUp) / ccUp * np.pi / 2))
        ccDown= 0.5* (wavelengthOfTheSineCurveInPoints - curveCenterPoint)
        rampDown= 0.5 * amplitudeOfTheSineCurve* (1 - np.sin(
            np.arange(-ccDown, ccDown) / ccDown * np.pi / 2))
        waveform= np.zeros(lengthInPoints) + offsetOfTheSineCurve
        waveform[0: curveCenterPoint]= offsetOfTheSineCurve + rampUp
        waveform[curveCenterPoint: wavelengthOfTheSineCurveInPoints]= \
            offsetOfTheSineCurve + rampDown
        waveform= np.roll(waveform, startPoint)
        self._waveform[waveTableId]= waveform
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def publish_sensor_frame(self, channel, pose=None): 
        """ 
        Publish sensor frame in which the point clouds
        are drawn with reference to. sensor_frame_msg.id is hashed
        by its channel (may be collisions since its right shifted by 32)
        """
        # Sensor frames msg
        msg = vs.obj_collection_t()
        msg.id = self.channel_uid(channel)
        msg.name = 'BOTFRAME_' + channel
        msg.type = vs.obj_collection_t.AXIS3D
        msg.reset = True

        # Send sensor pose
        pose_msg = vs.obj_t()
        roll, pitch, yaw, x, y, z = pose.to_rpyxyz(axes='sxyz')
        pose_msg.id = 0
        pose_msg.x, pose_msg.y, pose_msg.z, \
            pose_msg.roll, pose_msg.pitch, pose_msg.yaw  = x, y, z, roll, pitch, yaw

        # Save pose
        self.set_sensor_pose(channel, pose)

        msg.objs = [pose_msg]
        msg.nobjs = len(msg.objs)
        self.lc.publish("OBJ_COLLECTION", msg.encode())
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def corners_to_edges(corners):
    """ Edges are represented in N x 6 form """
    return np.hstack([corners, np.roll(corners, 1, axis=0)])
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def interpolate(self, other, this_weight):
        q0, q1 = np.roll(self.q, shift=1), np.roll(other.q, shift=1)
        u = 1 - this_weight
        assert(u >= 0 and u <= 1)
        cos_omega = np.dot(q0, q1)

        if cos_omega < 0:
            result = -q0[:]
            cos_omega = -cos_omega
        else:
            result = q0[:]

        cos_omega = min(cos_omega, 1)

        omega = math.acos(cos_omega)
        sin_omega = math.sin(omega)
        a = math.sin((1-u) * omega)/ sin_omega
        b = math.sin(u * omega) / sin_omega

        if abs(sin_omega) < 1e-6:
            # direct linear interpolation for numerically unstable regions
            result = result * this_weight + q1 * u
            result /= math.sqrt(np.dot(result, result))
        else:
            result = result*a + q1*b
        return Quaternion(np.roll(result, shift=-1))

    # To conversions
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def to_wxyz(self): 
        q = np.roll(self.q, shift=1)
        return q
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def from_wxyz(cls, q): 
        return cls(np.roll(q, shift=-1))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def from_rpy (cls, roll, pitch, yaw, axes='rxyz'):
        """ Construct Quaternion from axis-angle representation """
        return cls(tf.quaternion_from_euler(roll, pitch, yaw, axes=axes))