Python numpy 模块,append() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.append()

项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_points(event,x,y,flags,param):
    global lpnts,rpnts

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))



    if event == cv2.EVENT_RBUTTONDOWN:
        rpnts = np.append(rpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [rpnts], False, (255, 0, 0))

        if rpnts.size>2:
            check(lpnts, rpnts[-1], rpnts[-2])



#check if the new point crosses a line
项目:MultiObjectTracker    作者:alokwhitewolf    | 项目源码 | 文件源码
def get_points(event, x, y, flags, param):
    global lpnts, mode, counter, which_intersect

    if event == cv2.EVENT_LBUTTONDOWN:
        lpnts = np.append(lpnts, np.array([[x, y]]), axis=0)
        cv2.polylines(img, [lpnts], False, (0, 0, 255))
        if lpnts.size > 2:
            if mode == 0:

                #check(l1, lpnts[-1], lpnts[-2])
                if check(l1, lpnts[-1], lpnts[-2]):
                    which_intersect = 0
                    mode = 1
                #check(l2, lpnts[-1], lpnts[-2])
                if check(l2, lpnts[-1], lpnts[-2]):
                    which_intersect = 1
                    mode = 1

            elif mode == 1:

                counter += 1
                if check(lines[(which_intersect + 1) % 2], lpnts[-1], lpnts[-2]):
                    mode = 3
                    print counter


# check if the new point crosses a line
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def rectifyCloud(xyc,autoPerimeterOffset=True,autoPerimeterDensity=True,
                 width=64, height=64, 
                 perimeterSubdivisionSteps=4, paddingScale=1.05, 
                 smoothing=0.001, warpQuality=9, perimeterOffset=None ):

    sourceGridPoints = getCloudGrid( xyc,autoPerimeterOffset=autoPerimeterOffset,autoPerimeterDensity=autoPerimeterDensity,
                 width=width, height=width, 
                 perimeterSubdivisionSteps=perimeterSubdivisionSteps, paddingScale=paddingScale, 
                 smoothing=smoothing, warpQuality=warpQuality, perimeterOffset=perimeterOffset)

    targetGridPoints = []
    for yi in range(height):
        for xi in range(width):
            targetGridPoints.append([xi,yi])

    return warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=warpQuality )
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
    mat_disp = (mat_var - mat_mean) / np.square(mat_mean)

    quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
    quantiles = np.append(quantiles, mat_mean.max())

    # merge bins with no difference in value
    quantiles = np.unique(quantiles)

    if len(quantiles) <= 1:
        # pathological case: the means are all identical. just return raw dispersion.
        return mat_disp

    # calc median dispersion per bin
    (disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)

    # calc median absolute deviation of dispersion per bin
    disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
    disp_abs_dev = abs(mat_disp - disp_meds_arr)
    (disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)

    # calculate normalized dispersion
    disp_mads_arr = disp_mads[disp_bins-1]
    disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
    return disp_norm
项目:BioNanoAnalyst    作者:AppliedBioinformatics    | 项目源码 | 文件源码
def parse_fasta(self):
        self.ref_id=dict()
        self.ref_inf=dict()
        i=1
        N = 0
        ref_inf=np.empty(shape=[0,3])
        for seqs in SeqIO.parse(self.ref,'fasta'):
            seq_id = seqs.id
            self.ref_id[i] = seq_id
            seq = str(seqs.seq.upper())
            seq_len = len(seq)
            self.ref_inf[seq_id]=seq_len
            N+=seq.count('N')
            ref_inf = np.append(ref_inf,[[i,seq_id,seq_len]],axis=0)
            i+=1
        self.ref_detail = pd.DataFrame(ref_inf,columns=['Index','Contig','Length(bp)'])
        self.N = N
项目:BioNanoAnalyst    作者:AppliedBioinformatics    | 项目源码 | 文件源码
def qualification_filter(self):
        """
        Providing information of those unqualified and qualified contigs from the orginal fasta file
        with the criterion: >20Kb & >=5 restriction sites inside.
        """
        unqualified = np.empty(shape=[0,3])
        qualified = np.empty(shape=[0,4])
        rm_dup = self.RcmapTable[['CMapId','ContigLength','NumSites']].drop_duplicates()
        for i in self.ref_id.keys():
            index = i
            name = self.ref_id[i]
            length = self.ref_inf[name]
            if i not in self.RcmapTable['CMapId'].unique():
                unqualified = np.append(unqualified,[[index,name, length]],axis=0)
            else:
                Id = rm_dup[rm_dup['CMapId']==i].index[0]
                sites = rm_dup['NumSites'][Id]
                qualified = np.append(qualified,[[index,name,length,sites]],axis=0)
        self.unqualified = pd.DataFrame(unqualified, columns=['index','contig','length(bp)'])
        self.qualified = pd.DataFrame(qualified, columns=['index','contig','length(bp)','numSites'])
项目:simple_rl    作者:david-abel    | 项目源码 | 文件源码
def _pad_features_with_zeros(self, state, action):
        '''
        Args:
            features (iterable)

        Returns:
            (list): Of the same length as self.max_state_features
        '''
        features = state.features()
        while len(features) < self.max_state_features:
            features = np.append(features, 0)

        # Reshape per update to cluster regression in sklearn 0.17.
        reshaped_features = np.append(features, [self.actions.index(action)])
        reshaped_features = reshaped_features.reshape(1, -1)

        return reshaped_features
项目:Sound-classification-on-Raspberry-Pi-with-Tensorflow    作者:GianlucaPaolocci    | 项目源码 | 文件源码
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
    ignored = 0
    features, labels, name = np.empty((0,161)), np.empty(0), np.empty(0)
    for label, sub_dir in enumerate(sub_dirs):
        print sub_dir
        for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
            try:
                mfccs, chroma, mel, contrast, tonnetz = extract_features(fn)
                ext_features = np.hstack([mfccs, chroma, mel, contrast, tonnetz])
                features = np.vstack([features,ext_features])
                l = [fn.split('-')[1]] * (mfccs.shape[0])
                labels = np.append(labels, l)
        except (KeyboardInterrupt, SystemExit):
        raise
            except:
                ignored += 1
    print "Ignored files: ", ignored
    return np.array(features), np.array(labels, dtype = np.int)
项目:ananke    作者:beiko-lab    | 项目源码 | 文件源码
def fill_array(self, target, value, chunk_size = 1000):
        """Fill the target HDF5 array with a single value. Useful for 
        initializing an array, since the rhdf5 package tends to segfault if you
        load an uninitialized data set.

        Parameters
        ----------
        target: str
            the location of the HDF5 array, e.g., "samples/time"
        value: any
            the value to fill the array with
        chunk_size: int
            the number of items to insert at a time. This only needs to be
            increased for very large data sets.
        """
        n = self.h5_table[target].shape[0]
        chunks = np.append(np.arange(0, n, chunk_size), n)
        for i in range(len(chunks)-1):
            self.h5_table[target][chunks[i]:chunks[i+1]] = (
                                            [value]*(chunks[i+1] - chunks[i]) )
项目:search-MjoLniR    作者:wikimedia    | 项目源码 | 文件源码
def append_features(df, *cols):
    """Append features from columns to the features vector.

    Parameters
    ----------
    df : pyspark.sql.DataFrame
    cols : list of str

    Returns
    -------
    pyspark.sql.DataFrame
    """
    def add_features(feat, *other):
        raw = feat.toArray()
        return Vectors.dense(np.append(raw, map(float, other)))
    add_features_udf = F.udf(add_features, VectorUDT())
    new_feat_list = df.schema['features'].metadata['features'] + cols
    return df.withColumn('features', mjolnir.spark.add_meta(
        df._sc, add_features_udf('features', *cols), {'features': new_feat_list}))
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def init_ops(self):
        self.input_ops = []
        for thread_num in range(self.n_threads):
            op = {}
            for attr_num in range(self.n_attrs):
                fq = self.file_queues[thread_num][attr_num]
                args = self.read_args[attr_num]
                kwargs = self.read_kwargs[attr_num]
                _op = self.get_input_op(fq, *args, **kwargs)
                if self.trans_dicts and self.trans_dicts[attr_num]:
                    td = self.trans_dicts[attr_num]
                    for k in td:
                        if k in _op:
                            _op[td[k]] = _op.pop(k)
                op.update(_op)
            self.input_ops.append(op)
        self.apply_postprocessing()
        return self.input_ops
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def get_data_paths(paths, file_pattern=DEFAULT_TFRECORDS_GLOB_PATTERN):
    if not isinstance(paths, list):
        assert isstring(paths)
        paths = [paths]
    if not isinstance(file_pattern, list):
        assert isstring(file_pattern)
        file_patterns = [file_pattern] * len(paths)
    else:
        file_patterns = file_pattern
    assert len(file_patterns) == len(paths), (file_patterns, paths)
    datasources = []
    for path, file_pattern in zip(paths, file_patterns):
        if os.path.isdir(path):
            tfrecord_pattern = os.path.join(path, file_pattern)
            datasource = tf.gfile.Glob(tfrecord_pattern)
            datasource.sort()
            datasources.append(datasource)
        else:
            datasources.append([path])
    dl = map(len, datasources)
    assert all([dl[0] == d for d in dl[1:]]), dl
    return datasources
项目:tfutils    作者:neuroailab    | 项目源码 | 文件源码
def parse_standard_tfmeta(paths):
    meta_list = []
    for path in paths:
        if isstring(path):
            if path.startswith('meta') and path.endswith('.pkl'):
                mpaths = [path]
            else:
                assert os.path.isdir(path)
                mpaths = filter(lambda x: x.startswith('meta') and x.endswith('.pkl'),
                                os.listdir(path))
                mpaths = [os.path.join(path, mp) for mp in mpaths]
        else:
            # in this case, it's a list
            assert isinstance(path, list)
            mpaths = path
        d = {}
        for mpath in mpaths:
            d.update(cPickle.load(open(mpath)))
        meta_list.append(d)
    return meta_list
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def train_tas(model, model_scope, num_epoches, result_file):
    height, width = FEATURE_HEIGHT, FEATURE_WIDTH

    feats0, feats1 = read_features_tas(height, width)

    y0 = np.zeros((feats0.shape[0], 1), dtype=np.float32)
    y1 = np.ones((feats1.shape[0], 1), dtype=np.float32)

    all_feats = np.append(feats0, feats1, axis=0)
    all_y = np.append(y0, y1, axis=0)

    print("all_feats shapes: toll = {}, closed = {}, all = {}; "
          "and dtype = {}".format(feats0.shape, feats1.shape, all_feats.shape, all_feats.dtype))
    print("all_y shape: {}; and dtype={}".format(all_y.shape, all_y.dtype))

    res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
    img_cnn = ImgConvNets(model, model_scope, height, width, class_count=2, keep_prob=0.5,
                          batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)

    img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
项目:DmsMsgRcg    作者:bshao001    | 项目源码 | 文件源码
def train_lss(model, model_scope, num_epoches, result_file):
    height, width = FEATURE_HEIGHT, FEATURE_WIDTH

    feats0, feats1, feats2, feats3 = read_features_lss(height, width)

    y0 = np.zeros((feats0.shape[0], 1), dtype=np.float32)
    y1 = np.ones((feats1.shape[0], 1), dtype=np.float32)
    y2 = np.ones((feats2.shape[0], 1), dtype=np.float32) * 2
    y3 = np.ones((feats3.shape[0], 1), dtype=np.float32) * 3

    all_feats = np.append(np.append(np.append(feats0, feats1, axis=0), feats2, axis=0),
                          feats3, axis=0)
    all_y = np.append(np.append(np.append(y0, y1, axis=0), y2, axis=0), y3, axis=0)

    print("all_feats shapes: zero toll = {}, closed = {}, normal = {}, congested = {},  all = {}; "
          "and dtype = {}".format(feats0.shape, feats1.shape, feats2.shape, feats3.shape,
                                  all_feats.shape, all_feats.dtype))
    print("all_y shape: {}; and dtype={}".format(all_y.shape, all_y.dtype))

    res_dir = os.path.join(PROJECT_ROOT, 'Data', 'Result')
    img_cnn = ImgConvNets(model, model_scope, height, width, class_count=4, keep_prob=0.5,
                          batch_size=32, learning_rate=1e-4, lr_adaptive=True, num_epoches=num_epoches)

    img_cnn.train(all_feats, all_y, res_dir, result_file=result_file)
项目:rain-metrics-python    作者:apendergrass    | 项目源码 | 文件源码
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
项目:rain-metrics-python    作者:apendergrass    | 项目源码 | 文件源码
def makedists(pdata,binl):
    ##### This is called from within makeraindist.
    ##### Caclulate distributions 
    pds=pdata.shape;    nlat=pds[1];    nlon=pds[0];    nd=pds[2]
    bins=np.append(0,binl)
    n=np.empty((nlon,nlat,len(binl)))
    binno=np.empty(pdata.shape)
    for ilon in range(nlon):
        for ilat in range(nlat):
            # this is the histogram - we'll get frequency from this
            thisn,thisbin=np.histogram(pdata[ilon,ilat,:],bins) 
            n[ilon,ilat,:]=thisn
            # these are the bin locations. we'll use these for the amount dist
            binno[ilon,ilat,:]=np.digitize(pdata[ilon,ilat,:],bins) 
    #### Calculate the number of days with non-missing data, for normalization
    ndmat=np.tile(np.expand_dims(np.nansum(n,axis=2),axis=2),(1,1,len(bins)-1))
    thisppdfmap=n/ndmat
    #### Iterate back over the bins and add up all the precip - this will be the rain amount distribution
    testpamtmap=np.empty(thisppdfmap.shape)
    for ibin in range(len(bins)-1):
        testpamtmap[:,:,ibin]=(pdata*(ibin==binno)).sum(axis=2)
    thispamtmap=testpamtmap/ndmat
    return thisppdfmap,thispamtmap
项目:autolab_core    作者:BerkeleyAutomation    | 项目源码 | 文件源码
def linear_trajectory_to(self, target_tf, traj_len):
        """Creates a trajectory of poses linearly interpolated from this tf to a target tf.

        Parameters
        ----------
        target_tf : :obj:`RigidTransform`
            The RigidTransform to interpolate to.
        traj_len : int
            The number of RigidTransforms in the returned trajectory.

        Returns
        -------
        :obj:`list` of :obj:`RigidTransform`
            A list of interpolated transforms from this transform to the target.
        """
        if traj_len < 0:
            raise ValueError('Traj len must at least 0')
        delta_t = 1.0 / (traj_len + 1)
        t = 0.0
        traj = []
        while t < 1.0:
            traj.append(self.interpolate_with(target_tf, t))
            t += delta_t
        traj.append(target_tf)
        return traj
项目:inqbus.rainflow    作者:Inqbus    | 项目源码 | 文件源码
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
项目:inqbus.rainflow    作者:Inqbus    | 项目源码 | 文件源码
def count_pairs(data):
    df = pd.DataFrame(data)

    start, target = df.columns.tolist()

    # first we create groups for each pair and take size of each group as count.
    # counts is a pandas.Series with the pairs as index
    counts = df.groupby([start, target]).size()

    # than we remove duplicate pairs from original dateframe,
    # so length and counts are equal in size
    df = df.drop_duplicates()

    # reset index to values of pairs to fit index of counts
    df.set_index([0, 1], inplace=True, drop=False)

    # now we append the counts as column to the original data
    df[2] = pd.Series(counts.values, index=counts.index)

    # just cast pandas-dataframe back to numpy 2d-array usable for following
    # steps
    array = df.values
    return array
项目:multimodal_varinf    作者:tmoer    | 项目源码 | 文件源码
def kl_train(z,prior,posterior,hps):
    # push prior through AR layer
    logqs = posterior.logps(z)
    if hps.n_flow > 0:
        nice_layers = []
        print('Does this print')
        for i in range(hps.n_flow):
            nice_layers.append(nice_layer(tf.shape(z),hps,'nice{}'.format(i),ar=hps.ar))

        for i,layer in enumerate(nice_layers):
            z,log_det = layer.forward(z)
            logqs += log_det

    # track the KL divergence after transformation     
    logps = prior.logps(z)
    kl = logqs - logps
    return z, kl

### Autoregressive layers
项目:multimodal_varinf    作者:tmoer    | 项目源码 | 文件源码
def forward(self,z):
        if not self.ar:
            mu,log_sigma = self._get_mu_and_sigma(z)
        else:
            # permute z
            z = tf.reshape(z,[-1]+[1]*self.hps.z_size)
            perm = np.random.permutation(self.hps.z_size)+1
            z = tf.transpose(z,np.append([0],perm))
            z = tf.reshape(z,[-1,self.hps.z_size])
            mu,log_sigma = ar_layer(z,self.hps,n_hidden=self.n_hidden)
        log_sigma = tf.clip_by_value(log_sigma,-5,5)
        if not self.hps.ignore_sigma_flow:
            y = z * tf.exp(log_sigma) + mu
            log_det = -1 * log_sigma
        else:
            y = z + mu
            log_det = 0.0
        return y,log_det
项目:typing-pattern-recognition    作者:abinashmeher999    | 项目源码 | 文件源码
def keyReleaseEvent(self, event):
        self.outerclass.end_time = np.append(self.outerclass.end_time, time.time())
        if event.key() == QtCore.Qt.Key_Return:
            if self.text() == self.outerclass.pwd:
                self.outerclass.timing_vector = np.empty((0,), dtype=np.float64)
                i = 0
                # print self.outerclass.end_time.size
                while i < self.outerclass.end_time.size - 1:
                    self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.start_time[i] - self.outerclass.end_time[i])
                    self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.end_time[i+1] - self.outerclass.start_time[i])
                    i += 1
                self.outerclass.timing_vector = np.append(self.outerclass.timing_vector, self.outerclass.start_time[i] - self.outerclass.end_time[i])
                print self.outerclass.start_time
                print self.outerclass.end_time
                print self.outerclass.timing_vector
                self.outerclass.tv_list.append(np.array(self.outerclass.timing_vector))
                self.outerclass.start_time = np.empty((0,), dtype=np.float64)
                self.outerclass.end_time = np.empty((0,), dtype=np.float64)
                self.outerclass.timing_vector = np.empty((0,), dtype=np.float64)
                self.clear()
            else:
                self.outerclass.end_time = np.empty((0,), dtype=np.float64)
                self.clear()
        # print "Key released"
        QtGui.QLineEdit.keyReleaseEvent(self, event)
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def get_image_array(roidb, scales, scale_indexes, need_mean=True):
    """
    build image array from specific roidb
    :param roidb: images to be processed
    :param scales: scale list
    :param scale_indexes: indexes
    :return: array [b, c, h, w], list of scales
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in range(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = scales[scale_indexes[i]]
        im, im_scale = image_processing.resize(im, target_size, config.MAX_SIZE)
        im_tensor = image_processing.transform(im, config.PIXEL_MEANS, need_mean=need_mean)
        processed_ims.append(im_tensor)
        im_scales.append(im_scale)
    array = image_processing.tensor_vstack(processed_ims)
    return array, im_scales
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def extract_candidates(predictions_scan, tf_matrix, pid, outputs_path):
    print 'computing blobs'
    start_time = time.time()
    blobs = blobs_detection.blob_dog(predictions_scan[0, 0], min_sigma=1, max_sigma=15, threshold=0.1)
    print 'blobs computation time:', (time.time() - start_time) / 60.
    print 'n blobs detected:', blobs.shape[0]

    blobs_original_voxel_coords = []
    for j in xrange(blobs.shape[0]):
        blob_j = np.append(blobs[j, :3], [1])
        blob_j_original = tf_matrix.dot(blob_j)
        blobs_original_voxel_coords.append(blob_j_original)

    blobs = np.asarray(blobs_original_voxel_coords)
    print blobs.shape
    utils.save_pkl(blobs, outputs_path + '/%s.pkl' % pid)
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def extract_candidates(predictions_scan, tf_matrix, pid, outputs_path):
    print 'computing blobs'
    start_time = time.time()
    blobs = blobs_detection.blob_dog(predictions_scan[0, 0], min_sigma=1, max_sigma=15, threshold=0.1)
    print 'blobs computation time:', (time.time() - start_time) / 60.
    print 'n blobs detected:', blobs.shape[0]

    blobs_original_voxel_coords = []
    for j in xrange(blobs.shape[0]):
        blob_j = np.append(blobs[j, :3], [1])
        blob_j_original = tf_matrix.dot(blob_j)
        blobs_original_voxel_coords.append(blob_j_original)

    blobs = np.asarray(blobs_original_voxel_coords)
    print blobs.shape
    utils.save_pkl(blobs, outputs_path + '/%s.pkl' % pid)
项目:gbrs    作者:churchill-lab    | 项目源码 | 文件源码
def get_genotype_probability(aln_profile, aln_specificity, sigma=0.12):
    # 'aln_specificity' should be a set of unit vectors (at least one of the entry is larger than 1.)
    num_haps = len(aln_profile)
    aln_vec = unit_vector(aln_profile)
    genoprob = []
    for i in xrange(num_haps):
        v1 = unit_vector(aln_specificity[i])
        for j in xrange(i, num_haps):
            if j == i:
                genoprob.append(sum(np.power(aln_vec - v1, 2))) # homozygotes
            else:
                v2 = unit_vector(aln_specificity[j])
                geno_vec = unit_vector(v1 + v2)
                # compute directional similarity
                genoprob.append(sum(np.power(aln_vec - geno_vec, 2))) # for heterozygotes
    genoprob = np.exp(np.array(genoprob) / (-2 * sigma * sigma))
    return np.array(genoprob / sum(genoprob))
项目:monogreedy    作者:jinjunqi    | 项目源码 | 文件源码
def update_eva_history(eva_history, eva_candidate):

    for i in range(len(eva_candidate)):
        phi = eva_candidate[i]

        continue_flag = 0
        for j in range(len(eva_history.phi)):
            if numpy.sum(numpy.abs(phi - eva_history.phi[j])) < 1e-4:
                continue_flag = 1
                break
        if continue_flag == 1:
            continue

        eva_history.phi.append(phi.tolist())
        eva_history.time.append(eva_a_time(phi))
        eva_history.acc.append(eva_a_acc(phi))
项目:monogreedy    作者:jinjunqi    | 项目源码 | 文件源码
def tune_tal(mono_phi_score, tal_list):
    errs = []
    tals = []
    for tal in tal_list:
        err = []
        for i in range(len(mono_phi_score)):
            mono_1 = numpy.delete(mono_phi_score, i, axis=0)
            dim_h = mono_phi_score[i][:-1]
            value_h, alpha = train_predict_regression(mono_1, dim_h, tal)
            err.append((value_h - mono_phi_score[i][-1])**2)
        err = numpy.mean(err)

        errs.append(err)
        tals.append(tal)
        print 'regression tal:', tal, 'err', err

    idx = numpy.argmin(errs)

    return tals[idx]
项目:SPOT    作者:Amossys-team    | 项目源码 | 文件源码
def add(self,data):
        """
        This function allows to append data to the already fitted data

        Parameters
        ----------
        data : list, numpy.array, pandas.Series
            data to append
        """
        if isinstance(data,list):
            data = np.array(data)
        elif isinstance(data,np.ndarray):
            data = data
        elif isinstance(data,pd.Series):
            data = data.values
        else:
            print('This data format (%s) is not supported' % type(data))
            return

        self.data = np.append(self.data,data)
        return
项目:SPOT    作者:Amossys-team    | 项目源码 | 文件源码
def add(self,data):
        """
        This function allows to append data to the already fitted data

        Parameters
        ----------
        data : list, numpy.array, pandas.Series
            data to append
        """
        if isinstance(data,list):
            data = np.array(data)
        elif isinstance(data,np.ndarray):
            data = data
        elif isinstance(data,pd.Series):
            data = data.values
        else:
            print('This data format (%s) is not supported' % type(data))
            return

        self.data = np.append(self.data,data)
        return
项目:SPOT    作者:Amossys-team    | 项目源码 | 文件源码
def add(self,data):
        """
        This function allows to append data to the already fitted data

        Parameters
        ----------
        data : list, numpy.array, pandas.Series
            data to append
        """
        if isinstance(data,list):
            data = np.array(data)
        elif isinstance(data,np.ndarray):
            data = data
        elif isinstance(data,pd.Series):
            data = data.values
        else:
            print('This data format (%s) is not supported' % type(data))
            return

        self.data = np.append(self.data,data)
        return
项目:faster-rcnn-resnet    作者:Eniac-Xie    | 项目源码 | 文件源码
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in xrange(num_images):
        im = cv2.imread(roidb[i]['image'])
        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales
项目:pauvre    作者:conchoecia    | 项目源码 | 文件源码
def plotArc(start_angle, stop_angle, radius, width, **kwargs):
    """ write a docstring for this function"""
    numsegments = 100
    theta = np.radians(np.linspace(start_angle+90, stop_angle+90, numsegments))
    centerx = 0
    centery = 0
    x1 = -np.cos(theta) * (radius)
    y1 = np.sin(theta) * (radius)
    stack1 = np.column_stack([x1, y1])
    x2 = -np.cos(theta) * (radius + width)
    y2 = np.sin(theta) *  (radius + width)
    stack2 = np.column_stack([np.flip(x2, axis=0), np.flip(y2,axis=0)])
    #add the first values from the first set to close the polygon
    np.append(stack2, [[x1[0],y1[0]]], axis=0)
    arcArray = np.concatenate((stack1,stack2), axis=0)
    return patches.Polygon(arcArray, True, **kwargs), ((x1, y1), (x2, y2))
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def eval(self, t):
        # given a time vector t, return the design matrix column vector(s)

        if self.type is None:
            return np.array([])

        hl = np.zeros((t.shape[0],))
        ht = np.zeros((t.shape[0],))

        if self.type in (0,2):
            hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)

        if self.type in (1,2):
            ht[t >= self.year] = 1

        return np.append(ht,hl) if np.any(hl) else ht
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def LoadParameters(self, C):

        s = 0
        for jump in self.table:
            if not jump.type is None:
                if jump.params == 1 and jump.T != 0:
                    jump.a = np.append(jump.a, C[s:s + 1])

                elif jump.params == 1 and jump.T == 0:
                    jump.b = np.append(jump.b, C[s:s + 1])

                elif jump.params == 2:
                    jump.b = np.append(jump.b, C[s:s + 1])
                    jump.a = np.append(jump.a, C[s + 1:s + 2])

                s = s + jump.params
项目:SentEval    作者:facebookresearch    | 项目源码 | 文件源码
def trainepoch(self, X, y, epoch_size=1):
        self.model.train()
        for _ in range(self.nepoch, self.nepoch + epoch_size):
            permutation = np.random.permutation(len(X))
            all_costs = []
            for i in range(0, len(X), self.batch_size):
                # forward
                idx = torch.LongTensor(permutation[i:i + self.batch_size])
                if isinstance(X, torch.cuda.FloatTensor):
                    idx = idx.cuda()
                Xbatch = Variable(X.index_select(0, idx))
                ybatch = Variable(y.index_select(0, idx))
                if self.cudaEfficient:
                    Xbatch = Xbatch.cuda()
                    ybatch = ybatch.cuda()
                output = self.model(Xbatch)
                # loss
                loss = self.loss_fn(output, ybatch)
                all_costs.append(loss.data[0])
                # backward
                self.optimizer.zero_grad()
                loss.backward()
                # Update parameters
                self.optimizer.step()
        self.nepoch += epoch_size
项目:tnt    作者:pytorch    | 项目源码 | 文件源码
def add(self, output, target):
        if torch.is_tensor(output):
            output = output.cpu().squeeze().numpy()
        if torch.is_tensor(target):
            target = target.cpu().squeeze().numpy()
        elif isinstance(target, numbers.Number):
            target = np.asarray([target])
        assert np.ndim(output) == 1, \
            'wrong output size (1D expected)'
        assert np.ndim(target) == 1, \
            'wrong target size (1D expected)'
        assert output.shape[0] == target.shape[0], \
            'number of outputs and targets does not match'
        assert np.all(np.add(np.equal(target, 1), np.equal(target, 0))), \
            'targets should be binary (0, 1)'

        self.scores = np.append(self.scores, output)
        self.targets = np.append(self.targets, target)
项目:combine-DT-with-NN-in-RL    作者:Burning-Bear    | 项目源码 | 文件源码
def set_replay_buffer(self,record):
        """After get reward from environment, Agent should add new record into replay buffer.

        Args:
            record: dict type, has following key at least:
                'reward':
                'terminal':
                'next_observation':
        """
        new_state = self.observation2state(record['observation'])
        if type(self.current_state) == dict:
            raise Exception("current state type error")

        self.replay_buffer.add(self.current_state, record['action'], record['reward'], new_state,
                               float(record['terminal']), self.current_feature, record['target_ob'])
        # self.replayMemory.append([self.current_state,record['action'],record['reward'],new_state,record['terminal'],record['feature']])
        # if len(self.replayMemory) > REPLAY_MEMORY:
        #     self.replayMemory.popleft()
        self.current_state = new_state
        self.current_feature = list_to_dic(record['observation'])
项目:OASIS    作者:j-friedrich    | 项目源码 | 文件源码
def plot_trace(n=0, lg=False):
    plt.plot(trueC[n], c=col[2], clip_on=False, zorder=5, label='Truth')
    plt.plot(solution, c=col[0], clip_on=False, zorder=7, label='Estimate')
    plt.plot(y, c=col[7], alpha=.7, lw=1, clip_on=False, zorder=-10, label='Data')
    if lg:
        plt.legend(frameon=False, ncol=3, loc=(.1, .62), columnspacing=.8)
    spks = np.append(0, solution[1:] - g * solution[:-1])
    plt.text(800, 2.2, 'Correlation: %.3f' % (np.corrcoef(trueSpikes[n], spks)[0, 1]), size=24)
    plt.gca().set_xticklabels([])
    simpleaxis(plt.gca())
    plt.ylim(0, 2.85)
    plt.xlim(0, 1500)
    plt.yticks([0, 2], [0, 2])
    plt.xticks([300, 600, 900, 1200], ['', ''])


# init params
项目:prbg    作者:Lakate    | 项目源码 | 文件源码
def longestrunones8(binin):
    ''' The focus of the test is the longest run of ones within M-bit blocks. The purpose of this test is to determine whether the length of the longest run of ones within the tested sequence is consistent with the length of the longest run of ones that would be expected in a random sequence. Note that an irregularity in the expected length of the longest run of ones implies that there is also an irregularity in the expected length of the longest run of zeroes. Long runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests.'''
    m = 8
    k = 3
    pik = [0.2148, 0.3672, 0.2305, 0.1875]
    blocks = [binin[xs*m:m+xs*m:] for xs in xrange(len(binin) / m)]
    n = len(blocks)
    counts1 = [xs+'01' for xs in blocks] # append the string 01 to guarantee the length of 1
    counts = [xs.replace('0',' ').split() for xs in counts1] # split into all parts
    counts2 = [map(len, xx) for xx in counts]
    counts4 = [(4 if xx > 4 else xx) for xx in map(max,counts2)]
    freqs = [counts4.count(spi) for spi in [1, 2, 3, 4]]
    chisqr1 = [(freqs[xx]-n*pik[xx])**2/(n*pik[xx]) for xx in xrange(4)]
    chisqr = reduce(su, chisqr1)
    pval = spc.gammaincc(k / 2.0, chisqr / 2.0)
    return pval
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def SExtractorCat2fits(sextractorfiles,stringcols=[1],header=73,verbose=True):
    """
    Converting an ascii catalog with columns defined in header in the SExtractor format, i.e. one column
    name per row preceeded by a "#" and a column numner, and followed by a description (or any ascii file
    with the given setup) to a fits binary table

    --- INPUT ---
    sextractorfiles   List of ascii files to convert to fits
    stringcols        Columns to use a string format for (all other columns will be set to double float)
    header            Header containing the column names of the catalogs following the "SExtractor notation"
    verbose           Toggle verbosity

    --- EXAMPLE OF USE ---
    import glob
    import tdose_utilities as tu
    catalogs = glob.glob('/Volumes/DATABCKUP2/MUSE-Wide/catalogs_photometry/catalog_photometry_candels-cdfs-*.cat')
    tu.SExtractorCat2fits(catalogs,stringcols=[1],header=73,verbose=True)

    """
    for sexcat_ascii in sextractorfiles:
        asciiinfo = open(sexcat_ascii,'r')
        photcols = []
        for line in asciiinfo:
            if line.startswith('#'):
                colname = line.split()[2]
                photcols.append(colname)

        photfmt = ['D']*len(photcols)
        for stringcol in stringcols:
            photfmt[stringcol] = 'A60'

        sexcat_fits   = tu.ascii2fits(sexcat_ascii,asciinames=photcols,skip_header=header,fitsformat=photfmt,verbose=verbose)

# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:confusion    作者:abhimanyudubey    | 项目源码 | 文件源码
def layers(net):
    out = []
    for elem in net['layer']:
        out.append(elem['name'])
        for x in elem['top']:
            out.append(x)
    return set(out)
项目:confusion    作者:abhimanyudubey    | 项目源码 | 文件源码
def get_layer(net,layer_name):
    out = []
    for elem in net['layer']:
        out.append(elem['name'])
    out_ind = []
    for i,elem in enumerate(out):
        if elem == layer_name:
            out_ind.append(i)
    return out_ind
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def warpCloud( xyc, sourceGridPoints, targetGridPoints, warpQuality=9 ):

    sourceTree = KDTree(sourceGridPoints, leafsize=10)
    warpedXYC = []  
    for c in xyc:
        nearestEdge = sourceTree.query(c,k=warpQuality)
        nx = 0.0
        ny = 0.0
        ws = 0.0
        for i in range(warpQuality):
            p = targetGridPoints[nearestEdge[1][i]]
            w = nearestEdge[0][i]
            if w == 0.0:
                nx = p[0]
                ny = p[1]
                ws = 1.0
                break
            else:
                w = 1.0 / w
                nx += w * p[0]
                ny += w * p[1]
                ws += w

        warpedXYC.append([nx/ws,ny/ws])

    warpedXYC = np.array(warpedXYC)
    return warpedXYC
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def getCoonsGrid( bounds, width=64, height=64, densities=None, paddingScale=1.0):

    targets = []
    for yi in range(height):
        for xi in range(width):
            targets.append(getCoonsPatchPointBez(bounds,xi,yi,width,height,densities=densities))

    targets = np.array(targets)
    tmean = [np.mean(targets[:,0]),np.mean(targets[:,1])]
    targets -= tmean
    targets *= paddingScale
    targets += tmean

    return targets
项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def __repr__(self):
        statements = []
        for metric in self.METRIC_NAMES:
            value = getattr(self, metric)[-1]
            if isinstance(value, list):
                if len(value) == 0:
                    value = np.nan
                else:
                    value = value[-1]
            statements.append("{m}:{v}".format(m=metric, v=value))

        return '\n'.join(statements)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def load_chunk(group, col_start, col_end):
        ''' Load a submatrix specified by the given column (barcode) range from an h5 group
        Args: col_start, col_end - half-open interval of column indices to load'''
        # Check bounds
        shape = getattr(group, cr_constants.H5_MATRIX_SHAPE_ATTR).read()
        assert col_start >= 0 and col_start < shape[1]
        assert col_end >= 0 and col_end <= shape[1]

        # Load genes and barcodes
        genes = GeneBCMatrix.load_genes_from_h5_group(group)
        bcs = GeneBCMatrix.load_bcs_from_h5_group(group)[col_start:col_end]
        matrix = GeneBCMatrix(genes, bcs)

        # Get views into full matrix
        data = getattr(group, cr_constants.H5_MATRIX_DATA_ATTR)
        indices = getattr(group, cr_constants.H5_MATRIX_INDICES_ATTR)
        indptr = getattr(group, cr_constants.H5_MATRIX_INDPTR_ATTR)

        # Determine extents of selected columns
        ind_start = indptr[col_start]
        if col_end < len(indptr)-1:
            # Last index (end-exclusive) is the start of the next column
            ind_end = indptr[col_end]
        else:
            # Last index is the last index in the matrix
            ind_end = len(data)

        chunk_data = data[ind_start:ind_end]
        chunk_indices = indices[ind_start:ind_end]
        chunk_indptr = np.append(indptr[col_start:col_end], ind_end) - ind_start
        chunk_shape = (shape[0], col_end - col_start)

        matrix.m = sp_sparse.csc_matrix((chunk_data, chunk_indices, chunk_indptr), shape=chunk_shape)

        return matrix
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def load_genomes_from_h5(filename):
        genomes = []
        with tables.open_file(filename, 'r') as f:
            for group in f.list_nodes(f.root):
                genome = group._v_name
                genomes.append(genome)
        return genomes