Python numpy 模块,copy() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.copy()

项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def corners_unwarp(img, nx, ny, undistorted):
    M = None
    warped = np.copy(img)
    # Use the OpenCV undistort() function to remove distortion
    undist = undistorted
    # Convert undistorted image to grayscale
    gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
    # Search for corners in the grayscaled image
    ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)

    if ret == True:
        # If we found corners, draw them! (just for fun)
        cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
        # Choose offset from image corners to plot detected corners
        # This should be chosen to present the result at the proper aspect ratio
        # My choice of 100 pixels is not exact, but close enough for our purpose here
        offset = 100 # offset for dst points
        # Grab the image shape
        img_size = (gray.shape[1], gray.shape[0])

        # For source points I'm grabbing the outer four detected corners
        src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
        # For destination points, I'm arbitrarily choosing some points to be
        # a nice fit for displaying our warped result
        # again, not exact, but close enough for our purposes
        dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
                                     [img_size[0]-offset, img_size[1]-offset],
                                     [offset, img_size[1]-offset]])
        # Given src and dst points, calculate the perspective transform matrix
        M = cv2.getPerspectiveTransform(src, dst)
        # Warp the image using OpenCV warpPerspective()
        warped = cv2.warpPerspective(undist, M, img_size)

    # Return the resulting image and matrix
    return warped, M
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_copyto_fromscalar():
    a = np.arange(6, dtype='f4').reshape(2, 3)

    # Simple copy
    np.copyto(a, 1.5)
    assert_equal(a, 1.5)
    np.copyto(a.T, 2.5)
    assert_equal(a, 2.5)

    # Where-masked copy
    mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
    np.copyto(a, 3.5, where=mask)
    assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
    mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
    np.copyto(a.T, 4.5, where=mask)
    assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def normalize_array (solution, prediction):
    ''' Use min and max of solution as scaling factors to normalize prediction,
    then threshold it to [0, 1]. Binarize solution to {0, 1}. 
    This allows applying classification scores to all cases.
    In principle, this should not do anything to properly formatted 
    classification inputs and outputs.'''
    # Binarize solution
    sol=np.ravel(solution) # convert to 1-d array
    maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf
    mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf
    if maxi == mini:
        print('Warning, cannot normalize')
        return [solution, prediction]
    diff = maxi - mini
    mid = (maxi + mini)/2.
    new_solution = np.copy(solution)
    new_solution[solution>=mid] = 1
    new_solution[solution<mid] = 0
    # Normalize and threshold predictions (takes effect only if solution not in {0, 1})
    new_prediction = (np.copy(prediction) - float(mini))/float(diff)
    new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1]
    new_prediction[new_prediction<0] = 0
    # Make probabilities smoother
    #new_prediction = np.power(new_prediction, (1./10))
    return [new_solution, new_prediction]
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2):
    if vocab_size is None:
      vocab_size = len(self.dp.word_index)
    print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size)
    num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1
    base_inds = []
    div_Y_inds = Y_inds
    print >>sys.stderr, "Number of factors: %d"%num_vecs
    for i in range(num_vecs):
      new_inds = div_Y_inds % base
      if i == num_vecs - 1:
        if new_inds.sum() == 0:
          # Most significant "digit" is a zero. Omit it.
          break
      base_inds.append(new_inds)
      div_Y_inds = numpy.copy(div_Y_inds/base)
    base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds]
    return base_vecs
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def get_masks(scans,masks_list):
    #%matplotlib inline
    scans1=scans.copy()
    maxv=255
    masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
    for i_m in range(len(masks_list)):
        for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
            for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
                masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
        for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
            scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
    for i in range(scans.shape[0]):
        print ('scan '+str(i))
        f, ax = plt.subplots(1, 2,figsize=(10,5))
        ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
        ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
        plt.show()
    return(masks)
项目:Homology_BG    作者:jyotikab    | 项目源码 | 文件源码
def postProcess(PDFeatures1,which):
        PDFeatures2 = np.copy(PDFeatures1)
        cols = np.shape(PDFeatures2)[1]
        for x in xrange(cols):
                indinf = np.where(np.isinf(PDFeatures2[:,x])==True)[0]
                if len(indinf) > 0:
                        PDFeatures2[indinf,x] = 0
                indnan = np.where(np.isnan(PDFeatures2[:,x])==True)[0]
                if len(indnan) > 0:
                        PDFeatures2[indnan,x] = 0

        indLN = np.where(PDFeatures2[:,0] < -1)[0]
        for x in indLN:
                PDFeatures2[x,0] = np.random.uniform(-0.75,-0.99,1)

        term1 = (PDFeatures2[:,2]+PDFeatures2[:,3]+PDFeatures2[:,5])/3.
        print term1

        PDFeatures2[:,1] = 1.-term1
        print "PDF",PDFeatures2[:,1]
        return PDFeatures2
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def pop(self):
        """
        Removes and returns [priority, exp_idx] for the 
        the maxmimum priority element
        """
        if self.size == 0:
            return None
        # Get max element (first element in pq_array)
        max_elt = np.copy(self.pq_array[0])
        # Most the last value (not necessarily the smallest) to the root
        self.pq_array[0] = self.pq_array[self.size-1]
        self.size -= 1
        # Update hash tables
        self.exp_hash[self.pq_array[0,1]], self.pq_hash[0] = 0, self.pq_array[0,1]
        # Rebalance
        self.__down_heap(0)
        return max_elt
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def __down_heap(self, i):
        """
        Rebalances the heap (by moving small values down)
        """
        # Calculate left and right child indices
        l = 2*i+1
        r = 2*i+2
        # Find index of the greatest of these elements
        if l < self.size and self.pq_array[l,0] > self.pq_array[i,0]:
            greatest = l
        else:
            greatest = i
        if r < self.size and self.pq_array[r,0] > self.pq_array[greatest,0]:
            greatest = r
        # Continue rebalancing if necessary
        if greatest != i:
            # swap elements at indices i, greatest
            self.pq_array[i], self.pq_array[greatest] = np.copy(self.pq_array[greatest]), np.copy(self.pq_array[i])
            # Update hash tables
            self.exp_hash[self.pq_array[i,1]], self.exp_hash[self.pq_array[greatest,1]], self.pq_hash[i], self.pq_hash[greatest] = i, greatest, self.pq_array[i,1], self.pq_array[greatest,1]

            self.__down_heap(greatest)
项目:demcoreg    作者:dshean    | 项目源码 | 文件源码
def apply_xy_shift(ds, xshift_m, yshift_m):
    """
    Apply horizontal shift to GDAL dataset GeoTransform

    Returns:
    GDAL Dataset copy with updated GeoTransform
    """
    print("X shift: ", xshift_m)
    print("Y shift: ", yshift_m)

    #Update geotransform
    gt_orig = ds.GetGeoTransform()
    gt_shift = np.copy(gt_orig)
    gt_shift[0] += xshift_m
    gt_shift[3] += yshift_m

    print("Original geotransform:", gt_orig)
    print("Updated geotransform:", gt_shift)

    #Update ds Geotransform
    ds_align = iolib.mem_drv.CreateCopy('', ds, 1)
    ds_align.SetGeoTransform(gt_shift)
    return ds_align
项目:brain_segmentation    作者:Ryo-Ito    | 项目源码 | 文件源码
def load_nifti(filename, with_affine=False):
    """
    load image from NIFTI file
    Parameters
    ----------
    filename : str
        filename of NIFTI file
    with_affine : bool
        if True, returns affine parameters

    Returns
    -------
    data : np.ndarray
        image data
    """
    img = nib.load(filename)
    data = img.get_data()
    data = np.copy(data, order="C")
    if with_affine:
        return data, img.affine
    return data
项目:qcqp    作者:cvxgrp    | 项目源码 | 文件源码
def admm_phase1(x0, prob, tol=1e-2, num_iters=1000):
    logging.info("Starting ADMM phase 1 with tol %.3f", tol)

    z = np.copy(x0)
    xs = [np.copy(x0) for i in range(prob.m)]
    us = [np.zeros(prob.n) for i in range(prob.m)]

    for t in range(num_iters):
        if max(prob.violations(z)) < tol:
            break
        z = (sum(xs)-sum(us))/prob.m
        for i in range(prob.m):
            x, u, f = xs[i], us[i], prob.fi(i)
            xs[i] = onecons_qcqp(z + u, f)
        for i in range(prob.m):
            us[i] += z - xs[i]

    return z
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def analyseparamsneighbourhood(svdata, params, includejumps, randomstate):
    parameterndarray = transformparameterndarray(np.array(params), includejumps)
    offsets = np.linspace(-.5, .5, 10)
    for dimension in range(params.dimensioncount):
        xs, ys = [], []
        parametername = params.getdimensionname(dimension)
        print('Perturbing %s...' % parametername)
        for offset in offsets:
            newparameterndarray = np.copy(parameterndarray)
            newparameterndarray[dimension] += offset
            xs.append(inversetransformparameterndarray(newparameterndarray, includejumps)[dimension])
            y = runsvljparticlefilter(svdata, sv.Params(*inversetransformparameterndarray(newparameterndarray, includejumps)), randomstate).stochfilter.loglikelihood
            ys.append(y)
        fig = plt.figure()
        plot = fig.add_subplot(111)
        plot.plot(xs, ys)
        plot.axvline(x=inversetransformparameterndarray(parameterndarray, includejumps)[dimension], color='red')
        plot.set_xlabel(parametername)
        plot.set_ylabel('loglikelihood')
        plt.show()
项目:DeepLearning_PlantDiseases    作者:MarkoArsenovic    | 项目源码 | 文件源码
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)

            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))

    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data

        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))

        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))

    return heatmap.reshape((output_height, output_width))
项目:dpl    作者:ppengtang    | 项目源码 | 文件源码
def _get_rois_blob(im_rois, im_scale_factors):
    """Converts RoIs into network inputs.

    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        im_scale_factors (list): scale factors as returned by _get_image_blob

    Returns:
        blob (ndarray): R x 5 matrix of RoIs in the image pyramid
    """
    rois_blob_real = []
    for i in xrange(len(im_scale_factors)):
        rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
        rois_blob = np.hstack((levels, rois))
        rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
    return rois_blob_real
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def time_hdf5():
    data_path = create_hdf5(BATCH_SIZE * NSTEPS)

    f = h5py.File(data_path)
    durs = []
    for step in tqdm.trange(NSTEPS, desc='running hdf5'):
        start_time = time.time()
        arr = f['data'][BATCH_SIZE * step: BATCH_SIZE * (step+1)]
        read_time = time.time()
        arr = copy.deepcopy(arr)
        copy_time = time.time()
        durs.append(['hdf5 read', step, read_time - start_time])
        durs.append(['hdf5 copy', step, copy_time - read_time])
    f.close()
    os.remove(data_path)
    durs = pandas.DataFrame(durs, columns=['kind', 'stepno', 'dur'])
    return durs
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def as_frames(self, from_frame, to_frame='world'):
        """Return a shallow copy of this rigid transform with just the frames
        changed.

        Parameters
        ----------
        from_frame : :obj:`str`
            The new from_frame.

        to_frame : :obj:`str`
            The new to_frame.

        Returns
        -------
        :obj:`RigidTransform`
            The RigidTransform with new frames.
        """
        return RigidTransform(self.rotation, self.translation, from_frame, to_frame)
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def generate_mask_rand(self, mask_pred):

        pool_len = mask_pred.shape[2]
        sample_num = mask_pred.shape[0]

        rand_mask = np.ones((sample_num, 1, pool_len, pool_len))
        mask_pixels = pool_len * pool_len
        count_drop_neg = self._count_drop_neg

        for i in range(sample_num):
            rp = np.random.permutation(np.arange(mask_pixels))
            rp = rp[0: count_drop_neg]

            now_mask = np.ones(mask_pixels)
            now_mask[rp] = 0 

            now_mask = np.reshape(now_mask, (pool_len, pool_len))
            rand_mask[i,0,:,:] = np.copy(now_mask)

        return rand_mask
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def segment_HU_scan_frederic(x, threshold=-350):
    mask = np.copy(x)
    binary_part = mask > threshold
    selem1 = skimage.morphology.disk(8)
    selem2 = skimage.morphology.disk(2)
    selem3 = skimage.morphology.disk(13)

    for iz in xrange(mask.shape[0]):
        # fill the body part
        filled = scipy.ndimage.binary_fill_holes(binary_part[iz])  # fill body
        filled_borders_mask = skimage.morphology.binary_erosion(filled, selem1)
        mask[iz] *= filled_borders_mask


        mask[iz] = skimage.morphology.closing(mask[iz], selem2)
        mask[iz] = skimage.morphology.erosion(mask[iz], selem3)
        mask[iz] = mask[iz] < threshold

    return mask
项目:openanalysis    作者:OpenWeavers    | 项目源码 | 文件源码
def merge_alternative(self, array, low, mid, high):
        left = np.copy(array[low: mid + 1])
        right = np.copy(array[mid + 1: high + 1])
        i, j, k = 0, 0, low
        while i < len(left) and j < len(right):
            self.count += 1
            if left[i] < right[j]:
                array[k] = left[i]
                i += 1
            else:
                array[k] = right[j]
                j += 1
            k += 1
            if self.visualization:
                self.hist_array = np.vstack((self.hist_array, array))
        while i < len(left):
            array[k] = left[i]
            i += 1
            k += 1
        while j < len(right):
            array[k] = right[j]
            j += 1
            k += 1
项目:a3c-mujoco    作者:Feryal    | 项目源码 | 文件源码
def reset_target(self):
        # Randomize goal position within specified bounds
        self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
                                         self.target_bounds[:, 0]
                                         ) + self.target_bounds[:, 0]
        geom_positions = self.sim.model.geom_pos.copy()
        prev_goal_location = geom_positions[1]

        while (np.linalg.norm(prev_goal_location - self.goal) <
               self.target_reset_distance):
            self.goal = np.random.rand(3) * (self.target_bounds[:, 1] -
                                             self.target_bounds[:, 0]
                                             ) + self.target_bounds[:, 0]

        geom_positions[1] = self.goal
        self.sim.model.geom_pos[:] = geom_positions
项目:SNPmatch    作者:Gregor-Mendel-Institute    | 项目源码 | 文件源码
def parseGT(snpGT):
    first = snpGT[0]
    snpBinary = np.zeros(len(snpGT), dtype = "int8")
    if first.find('|') != -1:
        ## GT is phased
        separator = "|"
    elif first.find('/') != -1:
        ## GT is not phased
        separator = "/"
    elif np.char.isdigit(first):
        return np.array(np.copy(snpGT), dtype = "int8")
    else:
        die("unable to parse the format of GT in vcf!")
    hetGT = "0" + separator + "1"
    refGT = "0" + separator + "0"
    altGT = "1" + separator + "1"
    nocall = "." + separator + "."
    snpBinary[np.where(snpGT == altGT)[0]] = 1
    snpBinary[np.where(snpGT == hetGT)[0]] = 2
    snpBinary[np.where(snpGT == nocall)[0]] = -1
    return snpBinary
项目:kor-char-rnn-tensorflow    作者:insikk    | 项目源码 | 文件源码
def create_batches(self):
        self.num_batches = int(self.tensor.size / (self.batch_size *
                                                   self.seq_length))

        # When the data (tensor) is too small,
        # let's give them a better error message
        if self.num_batches == 0:
            assert False, "Not enough data. Make seq_length and batch_size small."

        self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
        xdata = self.tensor
        ydata = np.copy(self.tensor)
        ydata[:-1] = xdata[1:]
        ydata[-1] = xdata[0]
        self.x_batches = np.split(xdata.reshape(self.batch_size, -1),
                                  self.num_batches, 1)
        self.y_batches = np.split(ydata.reshape(self.batch_size, -1),
                                  self.num_batches, 1)
项目:mixedvines    作者:asnelt    | 项目源码 | 文件源码
def logpdf(self, samples):
        '''
        Calculates the log of the probability density function.

        Parameters
        ----------
        samples : array_like
            n-by-2 matrix of samples where n is the number of samples.

        Returns
        -------
        vals : ndarray
            Log of the probability density function evaluated at `samples`.
        '''
        samples = np.copy(np.asarray(samples))
        samples = self.__rotate_input(samples)
        inner = np.all(np.bitwise_and(samples > 0.0, samples < 1.0), axis=1)
        outer = np.invert(inner)
        vals = np.zeros(samples.shape[0])
        vals[inner] = self._logpdf(samples[inner, :])
        # Assign zero mass to border
        vals[outer] = -np.inf
        return vals
项目:sketch_rnn_classification    作者:payalbajaj    | 项目源码 | 文件源码
def _get_batch_from_indices(self, indices):
    """Given a list of indices, return the potentially augmented batch."""
    x_batch = []
    seq_len = []
    x_labels = []
    for idx in range(len(indices)):
      i = indices[idx]
      data = self.random_scale(self.strokes[i])
      data_copy = np.copy(data)
      if self.augment_stroke_prob > 0:
        data_copy = augment_strokes(data_copy, self.augment_stroke_prob)
      x_batch.append(data_copy)
      length = len(data_copy)
      seq_len.append(length)
      x_labels.append(self.labels[i])
    seq_len = np.array(seq_len, dtype=int)
    # We return three things: stroke-3 format, stroke-5 format, list of seq_len.
    return x_batch, x_labels, self.pad_batch(x_batch, self.max_seq_length), seq_len
项目:PleioPred    作者:yiminghu    | 项目源码 | 文件源码
def pred_accuracy(y_true, y_pred):
    y_true = sp.copy(y_true)
    if len(sp.unique(y_true))==2:
        print 'dichotomous trait, calculating AUC'
        y_min = y_true.min()
        y_max = y_true.max()
        if y_min!= 0 or y_max!=1:
            y_true[y_true==y_min]=0
            y_true[y_true==y_max]=1
        fpr, tpr, thresholds = metrics.roc_curve(y_true, y_pred)
        auc = metrics.auc(fpr, tpr)
        return auc
    else:
        print 'continuous trait, calculating COR'
        cor = sp.corrcoef(y_true,y_pred)[0,1]
        return cor
项目:text2image    作者:emansim    | 项目源码 | 文件源码
def load_weights(params, path, num_conv):
    print 'Loading gan weights from ' + path
    with h5py.File(path, 'r') as hdf5:
        params['skipthought2image'] = theano.shared(np.copy(hdf5['skipthought2image']))
        params['skipthought2image-bias'] = theano.shared(np.copy(hdf5['skipthought2image-bias']))

        for i in xrange(num_conv):
            params['W_conv{}'.format(i)] = theano.shared(np.copy(hdf5['W_conv{}'.format(i)]))
            params['b_conv{}'.format(i)] = theano.shared(np.copy(hdf5['b_conv{}'.format(i)]))

            # Flip w,h axes
            params['W_conv{}'.format(i)] = params['W_conv{}'.format(i)][:,:,::-1,::-1]

            w = np.abs(np.copy(hdf5['W_conv{}'.format(i)]))
            print 'W_conv{}'.format(i), np.min(w), np.mean(w), np.max(w)
            b = np.abs(np.copy(hdf5['b_conv{}'.format(i)]))
            print 'b_conv{}'.format(i), np.min(b), np.mean(b), np.max(b)

    return params
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def successors (config):
    import math
    leds = len(config)
    size = int(math.sqrt(leds))
    succs = []
    for i in range(leds):
        y = i // size
        x = i % size
        succ = np.copy(config)
        succ[i] *= -1
        if x-1 >= 0:
            succ[i-1] *= -1
        if x+1 < size:
            succ[i+1] *= -1
        if y-1 >= 0:
            succ[i-size] *= -1
        if y+1 < size:
            succ[i+size] *= -1
        succs.append(succ)
    return succs
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def run(ae,xs):
    zs = ae.encode_binary(xs)
    ys = ae.decode_binary(zs)
    mod_ys = []
    correlations = []
    print(ys.shape)
    print("corrlations:")
    print("bit \ image  {}".format(range(len(xs))))
    for i in range(ae.N):
        mod_zs = np.copy(zs)
        # increase the latent value from 0 to 1 and check the difference
        for j in range(11):
            mod_zs[:,i] = j / 10.0
            mod_ys.append(ae.decode_binary(mod_zs))
        zero_zs,one_zs = np.copy(zs),np.copy(zs)
        zero_zs[:,i] = 0.
        one_zs[:,i] = 1.
        correlation = np.mean(np.square(ae.decode_binary(zero_zs) - ae.decode_binary(one_zs)),
                              axis=(1,2))
        correlations.append(correlation)
        print("{:>5} {}".format(i,correlation))
    plot_grid2(np.einsum("ib...->bi...",np.array(mod_ys)).reshape((-1,)+ys.shape[1:]),
               w=11,path=ae.local("dump_significance.png"))
    return np.einsum("ib->bi",correlations)
项目:latplan    作者:guicho271828    | 项目源码 | 文件源码
def prepare(data):
    num = len(data)
    dim = data.shape[1]//2
    print("in prepare: ",data.shape,num,dim)
    pre, suc = data[:,:dim], data[:,dim:]

    suc_invalid = np.copy(suc)
    random.shuffle(suc_invalid)

    diff_valid   = suc         - pre
    diff_invalid = suc_invalid - pre

    inputs = np.concatenate((diff_valid,diff_invalid),axis=0)
    outputs = np.concatenate((np.ones((num,1)),np.zeros((num,1))),axis=0)
    print("in prepare: ",inputs.shape,outputs.shape)
    io = np.concatenate((inputs,outputs),axis=1)
    random.shuffle(io)

    train_n = int(2*num*0.9)
    train, test = io[:train_n], io[train_n:]
    train_in, train_out = train[:,:dim], train[:,dim:]
    test_in, test_out = test[:,:dim], test[:,dim:]
    print("in prepare: ",train_in.shape, train_out.shape, test_in.shape, test_out.shape)

    return train_in, train_out, test_in, test_out
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def get_poly_centers(ob, type=np.float32):
    mod = False
    m_count = len(ob.modifiers)
    if m_count > 0:
        show = np.zeros(m_count, dtype=np.bool)
        ren_set = np.copy(show)
        ob.modifiers.foreach_get('show_render', show)
        ob.modifiers.foreach_set('show_render', ren_set)
        mod = True
    mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
    p_count = len(mesh.polygons)
    center = np.zeros(p_count * 3)#, dtype=type)
    mesh.polygons.foreach_get('center', center)
    center.shape = (p_count, 3)
    bpy.data.meshes.remove(mesh)
    if mod:
        ob.modifiers.foreach_set('show_render', show)

    return center
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def get_poly_normals(ob, type=np.float32):
    mod = False
    m_count = len(ob.modifiers)
    if m_count > 0:
        show = np.zeros(m_count, dtype=np.bool)
        ren_set = np.copy(show)
        ob.modifiers.foreach_get('show_render', show)
        ob.modifiers.foreach_set('show_render', ren_set)
        mod = True
    mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
    p_count = len(mesh.polygons)
    normal = np.zeros(p_count * 3)#, dtype=type)
    mesh.polygons.foreach_get('normal', normal)
    normal.shape = (p_count, 3)
    bpy.data.meshes.remove(mesh)
    if mod:
        ob.modifiers.foreach_set('show_render', show)

    return normal
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def get_v_normals(ob, type=np.float32):
    mod = False
    m_count = len(ob.modifiers)
    if m_count > 0:
        show = np.zeros(m_count, dtype=np.bool)
        ren_set = np.copy(show)
        ob.modifiers.foreach_get('show_render', show)
        ob.modifiers.foreach_set('show_render', ren_set)
        mod = True
    mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER')
    v_count = len(mesh.vertices)
    normal = np.zeros(v_count * 3)#, dtype=type)
    mesh.vertices.foreach_get('normal', normal)
    normal.shape = (v_count, 3)
    bpy.data.meshes.remove(mesh)
    if mod:
        ob.modifiers.foreach_set('show_render', show)

    return normal
项目:Modeling-Cloth    作者:the3dadvantage    | 项目源码 | 文件源码
def execute(self, context):
        ob = bpy.context.object
        bpy.ops.object.mode_set(mode='OBJECT')
        sel = [i.index for i in ob.data.vertices if i.select]

        name = ob.name
        matrix = ob.matrix_world.copy()
        for v in sel:    
            e = bpy.data.objects.new('modeling_cloth_pin', None)
            bpy.context.scene.objects.link(e)
            if ob.active_shape_key is None:    
                closest = matrix * ob.data.vertices[v].co# * matrix
            else:
                closest = matrix * ob.active_shape_key.data[v].co# * matrix
            e.location = closest #* matrix
            e.show_x_ray = True
            e.select = True
            e.empty_draw_size = .1
            data[name].pin_list.append(v)
            data[name].hook_list.append(e)            
            ob.select = False
        bpy.ops.object.mode_set(mode='EDIT')       

        return {'FINISHED'}
项目:SelfDrivingCar    作者:aguijarro    | 项目源码 | 文件源码
def process_image(image):
    # printing out some stats and plotting
    print('This image is:', type(image), 'with dimesions:', image.shape)
    gray = grayscale(image)
    # Define a kernel size and apply Gaussian smoothing
    kernel_size = 5
    blur_gray = gaussian_blur(gray, kernel_size)
    # plt.imshow(blur_gray, cmap='gray')

    # Define our parameters for Canny and apply
    low_threshold = 45 #50
    high_threshold = 150 #150
    edges = canny(blur_gray, low_threshold, high_threshold)

    # This time we are defining a four sided polygon to mask
    imshape = image.shape
    #vertices = np.array([[(0,imshape[0]),(475, 310), (475, 310), (imshape[1],imshape[0])]], dtype=np.int32)
    vertices = np.array([[(0,imshape[0]),(450, 330), (490, 310), (imshape[1],imshape[0])]], dtype=np.int32)    
    masked_edges = region_of_interest(edges, vertices)

    # Define the Hough transform parameters
    # Make a blank the same size as our image to draw on
    rho = 1 # distance resolution in pixels of the Hough grid
    theta = np.pi/180 # angular resolution in radians of the Hough grid
    threshold = 15    # minimum number of votes (intersections in Hough grid cell)
    min_line_length = 40 #minimum number of pixels making up a line 150 - 40
    max_line_gap = 130 # maximum gap in pixels between connectable line segments 58 -95
    line_image = np.copy(image)*0 # creating a blank to draw lines on

    lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap)
    # Draw the lines on the edge image
    lines_edges = weighted_img(lines, image)
    return lines_edges
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def _reset(self):
        '''
        Set up some book-keeping variables for optimization. Don't call this
        manually.
        '''
        # Set up some variables for book-keeping
        self.epoch = 0
        self.best_val_acc = 0
        self.best_params = {}
        self.loss_history = []
        self.val_acc_history = []
        self.train_acc_history = []
        self.pbar = None

        # Make a deep copy of the optim_config for each parameter
        self.optim_configs = {}
        self.params, self.grad_params = self.model.get_parameters()
        # self.weights, _ = self.model.get_parameters()
        for p in range(len(self.params)):
            d = {k: v for k, v in self.optim_config.iteritems()}
            self.optim_configs[p] = d

        self.multiprocessing = bool(self.num_processes-1)
        if self.multiprocessing:
            self.pool = mp.Pool(self.num_processes, init_worker)
项目:PyFunt    作者:dnlcrl    | 项目源码 | 文件源码
def eval_numerical_gradient_array(f, x, df, h=1e-5):
    '''
    Evaluate a numeric gradient for a function that accepts a numpy
    array and returns a numpy array.
    '''
    grad = np.zeros_like(x)
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        ix = it.multi_index

        oldval = x[ix]
        x[ix] = oldval + h
        pos = f(x).copy()
        x[ix] = oldval - h
        neg = f(x).copy()
        x[ix] = oldval

        grad[ix] = np.sum((pos - neg) * df) / (2 * h)
        it.iternext()
    return grad
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_order(self):
        # It turns out that people rely on np.copy() preserving order by
        # default; changing this broke scikit-learn:
        #   https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783
        a = np.array([[1, 2], [3, 4]])
        assert_(a.flags.c_contiguous)
        assert_(not a.flags.f_contiguous)
        a_fort = np.array([[1, 2], [3, 4]], order="F")
        assert_(not a_fort.flags.c_contiguous)
        assert_(a_fort.flags.f_contiguous)
        a_copy = np.copy(a)
        assert_(a_copy.flags.c_contiguous)
        assert_(not a_copy.flags.f_contiguous)
        a_fort_copy = np.copy(a_fort)
        assert_(not a_fort_copy.flags.c_contiguous)
        assert_(a_fort_copy.flags.f_contiguous)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_axis_keyword(self):
        a3 = np.array([[2, 3],
                       [0, 1],
                       [6, 7],
                       [4, 5]])
        for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
            orig = a.copy()
            np.median(a, axis=None)
            for ax in range(a.ndim):
                np.median(a, axis=ax)
            assert_array_equal(a, orig)

        assert_allclose(np.median(a3, axis=0), [3,  4])
        assert_allclose(np.median(a3.T, axis=1), [3,  4])
        assert_allclose(np.median(a3), 3.5)
        assert_allclose(np.median(a3, axis=None), 3.5)
        assert_allclose(np.median(a3.T), 3.5)
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def _vectorize_call(self, func, args):
        """Vectorized call to `func` over positional `args`."""
        if not args:
            _res = func()
        else:
            ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)

            # Convert args to object arrays first
            inputs = [array(_a, copy=False, subok=True, dtype=object)
                      for _a in args]

            outputs = ufunc(*inputs)

            if ufunc.nout == 1:
                _res = array(outputs,
                             copy=False, subok=True, dtype=otypes[0])
            else:
                _res = tuple([array(_x, copy=False, subok=True, dtype=_t)
                              for _x, _t in zip(outputs, otypes)])
        return _res
项目:gtzan.keras    作者:Hguimaraes    | 项目源码 | 文件源码
def fit(self, x, augment=False, rounds=1, seed=None):
    x = np.asarray(x, dtype=K.floatx())

    if x.ndim != 2:
      raise ValueError('Input to `.fit()` should have rank 2. '
        'Got array with shape: ' + str(x.shape))

    if seed is not None:
      np.random.seed(seed)

    x = np.copy(x)
    if augment:
      ax = np.zeros(tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
      for r in range(rounds):
        for i in range(x.shape[0]):
          ax[i + r * x.shape[0]] = self.random_transform(x[i])
      x = ax

# @Class: Iterator
# @Description:
#   Abstract base class for Music data iterators.
#   n: Integer, total number of samples in the dataset to loop over.
#   batch_size: Integer, size of a batch.
#   shuffle: Boolean, whether to shuffle the data between epochs.
#   seed: Random seeding for data shuffling.
项目:AutoML5    作者:djajetic    | 项目源码 | 文件源码
def log_loss(solution, prediction, task = 'binary.classification'):
    ''' Log loss for binary and multiclass. '''
    [sample_num, label_num] = solution.shape
    eps = 1e-15

    pred = np.copy(prediction) # beware: changes in prediction occur through this
    sol = np.copy(solution)
    if (task == 'multiclass.classification') and (label_num>1):
        # Make sure the lines add up to one for multi-class classification
        norma = np.sum(prediction, axis=1)
        for k in range(sample_num):
            pred[k,:] /= sp.maximum (norma[k], eps) 
        # Make sure there is a single label active per line for multi-class classification
        sol = binarize_predictions(solution, task='multiclass.classification')
        # For the base prediction, this solution is ridiculous in the multi-label case

    # Bounding of predictions to avoid log(0),1/0,...
    pred = sp.minimum (1-eps, sp.maximum (eps, pred))
    # Compute the log loss    
    pos_class_log_loss = - mvmean(sol*np.log(pred), axis=0)
    if (task != 'multiclass.classification') or (label_num==1):
        # The multi-label case is a bunch of binary problems.
        # The second class is the negative class for each column.
        neg_class_log_loss = - mvmean((1-sol)*np.log(1-pred), axis=0)
        log_loss = pos_class_log_loss + neg_class_log_loss
        # Each column is an independent problem, so we average.
        # The probabilities in one line do not add up to one.
        # log_loss = mvmean(log_loss) 
        # print('binary {}'.format(log_loss))
        # In the multilabel case, the right thing i to AVERAGE not sum
        # We return all the scores so we can normalize correctly later on
    else:
        # For the multiclass case the probabilities in one line add up one.
        log_loss = pos_class_log_loss
        # We sum the contributions of the columns.
        log_loss = np.sum(log_loss) 
        #print('multiclass {}'.format(log_loss))
    return log_loss
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def test(self, vocab_size, use_onto_lstm, S_ind_test=None, C_ind_test=None, hierarchical=False, base=2, oov_list=None):
    X_test = C_ind_test[:,:-1] if use_onto_lstm else S_ind_test[:,:-1] # remove the last words' hyps in all sentences
    Y_inds_test = S_ind_test[:,1:]
    if hierarchical:
      test_targets = self._factor_target_indices(Y_inds_test, vocab_size, base=base)
    else:
      test_targets = [self._make_one_hot(Y_inds_test, vocab_size)]
    print >>sys.stderr, "Evaluating model on test data"
    test_loss = self.model.evaluate(X_test, test_targets)
    print >>sys.stderr, "Test loss: %.4f"%test_loss
    if oov_list is not None:
      oov_inds = [self.dp.word_index[w] for w in oov_list]
      non_oov_Y_inds = numpy.copy(Y_inds_test)
      for ind in oov_inds:
    non_oov_Y_inds[non_oov_Y_inds == ind] = 0
      non_oov_test_targets = self._factor_target_indices(non_oov_Y_inds, vocab_size, base=base)
      non_oov_test_loss = self.model.evaluate(X_test, non_oov_test_targets)
      print >>sys.stderr, "Non-oov test loss: %.4f"%non_oov_test_loss
    factored_test_preds = [-((numpy.log(pred) * target).sum(axis=-1)) for pred, target in zip(self.model.predict(X_test), test_targets)]
    test_preds = sum(factored_test_preds)
    #non_null_probs = []
    #for test_pred, inds in zip(test_preds, Y_inds_test):
    #  wanted_probs = []
    #  for tp, ind in zip(test_pred, inds):
    #    if ind != 0:
    #      wanted_probs.append(tp)
    #  non_null_probs.append(wanted_probs)
    #return non_null_probs
    return test_preds
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def update_filter(self, timestep, estimate, ranges):
        """Update position filter.

        Args:
             timestep (float): Time elapsed since last update.
             estimate (StateEstimate): Position estimate to update.
             ranges (list of floats): Range measurements.

        Returns:
            new_estimate (StateEstimate): Updated position estimate.
            outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier.
        """
        num_of_units = len(ranges)
        x = estimate.state
        P = estimate.covariance
        # Compute process matrix and covariance matrices
        F, Q, R = self._compute_process_and_covariance_matrices(timestep)
        # rospy.logdebug('F: {}'.format(F))
        # rospy.logdebug('Q: {}'.format(Q))
        # rospy.logdebug('R: {}'.format(R))
        # Prediction
        x = np.dot(F, x)
        P = np.dot(F, np.dot(P, F.T)) + Q
        # Update
        n = np.copy(x)
        H = np.zeros((num_of_units, x.size))
        z = np.zeros((num_of_units, 1))
        h = np.zeros((num_of_units, 1))
        for i in xrange(self.ikf_iterations):
            n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R)
        if outlier_flag:
            new_estimate = estimate
        else:
            new_state = n
            new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P)
            new_estimate = UWBTracker.StateEstimate(new_state, new_covariance)
        return new_estimate, outlier_flag
项目:kaggle_dsb2017    作者:astoc    | 项目源码 | 文件源码
def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   # was 25
        width_shift_range=0.3,  # ws 0.3; was 0.1# tried 0.01
        height_shift_range=0.3,   # was 0.3; was 0.1 # tried 0.01
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    scans_g=scans.copy()
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans_g=np.vstack([scans_g,batch])
        i += 1
        if i > n:
            break
    i=0
    masks_g=masks.copy()
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks_g=np.vstack([masks_g,batch])
        i += 1
        if i > n:
            break
    return((scans_g,masks_g))
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
项目:human-rl    作者:gsastry    | 项目源码 | 文件源码
def add_noise(self, labels, seed_str):
        if self.false_positive_rate > 0 or self.false_negative_rate > 0:
            r = np.random.RandomState()
            b = bytes(",".join(
                [seed_str, str(self.false_positive_rate),
                 str(self.false_negative_rate)]), 'ascii')
            r.seed(list(hashlib.md5(b).digest()))
            p = r.rand(len(labels))
            original_labels = np.copy(labels)
            labels[(~original_labels) & (p < self.false_positive_rate)] = True
            labels[(original_labels) & (p < self.false_negative_rate)] = False
        return labels

    ## currently unused, randomizes *noise_prob* of the labels
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def add_many(self, elems):
        self.active = True
        elems = np.copy(elems).astype(np.int_)
        elems[elems > self.max_value] = 1 + self.max_value
        self.counts += np.bincount(elems, minlength=len(self.counts))