Python numpy 模块,NAN 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.NAN

项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def __init__(self, x0, P0, Q, R, cor, f, h):        
        self.Q = Q
        self.R = R
        self.cor = cor
        self.fa = lambda col: f(col[0], col[2])
        self.ha = lambda col: h(col[0], col[1])

        Pxx = P0
        Pxv = 0.
        self.xa = np.array( ((x0,), (0.,), (0.,), (0.,)) )
        self.Pa = np.array( ((Pxx, Pxv   , 0.      , 0.      ),
                             (Pxv, self.R, 0.      , 0.      ),
                             (0. , 0.    , self.Q  , self.cor),
                             (0. , 0.    , self.cor, self.R  )) )

        self.lastobservation = np.NAN
        self.predictedobservation = np.NAN
        self.innov = np.NAN
        self.innovcov = np.NAN
        self.gain = np.NAN

        self.loglikelihood = 0.0
项目:diluvian    作者:aschampion    | 项目源码 | 文件源码
def remask(self):
        """Reset the mask based on the seeded connected component.
        """
        body = self.to_body()
        if not body.is_seed_in_mask():
            return False
        new_mask_bin, bounds = body.get_seeded_component(CONFIG.postprocessing.closing_shape)
        new_mask_bin = new_mask_bin.astype(np.bool)

        mask_block = self.mask[map(slice, bounds[0], bounds[1])].copy()
        # Clip any values not in the seeded connected component so that they
        # cannot not generate moves when rechecking.
        mask_block[~new_mask_bin] = np.clip(mask_block[~new_mask_bin], None, 0.9 * CONFIG.model.t_move)

        self.mask[:] = np.NAN
        self.mask[map(slice, bounds[0], bounds[1])] = mask_block
        return True
项目:pyktrader2    作者:harveywwu    | 项目源码 | 文件源码
def MA_RIBBON(df, ma_series):
    ma_array = np.zeros([len(df), len(ma_series)])
    ema_list = []
    for idx, ma_len in enumerate(ma_series):
        ema_i = EMA(df, n = ma_len, field = 'close')
        ma_array[:, idx] = ema_i
        ema_list.append(ema_i)
    corr = np.empty([len(df)])
    pval = np.empty([len(df)])
    dist = np.empty([len(df)])
    corr[:] = np.NAN
    pval[:] = np.NAN
    dist[:] = np.NAN
    max_n = max(ma_series)
    for idy in range(len(df)):
        if idy >= max_n - 1:
            corr[idy], pval[idy] = stats.spearmanr(ma_array[idy,:], range(len(ma_series), 0, -1))
            dist[idy] = max(ma_array[idy,:]) - min(ma_array[idy,:])
    corr_ts = pd.Series(corr*100, index = df.index, name = "MARIBBON_CORR")
    pval_ts = pd.Series(pval*100, index = df.index, name = "MARIBBON_PVAL")
    dist_ts = pd.Series(dist, index = df.index, name = "MARIBBON_DIST")
    return pd.concat([corr_ts, pval_ts, dist_ts] + ema_list, join='outer', axis=1)
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def read_csv(filename, skip_lines=0):
    csvfile = file(filename, 'rb')
    reader = csv.reader(csvfile)
    data = np.empty(0, dtype=object)
    last_count = np.NAN
    for line in reader:
        if skip_lines > 0:
            skip_lines = skip_lines - 1
            continue
        if data.size > 0:
            if len(line) != last_count:
                raise Exception('unequal columes found')
            data = np.c_[data, line]
            last_count = len(line)
        else:
            data = np.array(line, dtype=object)
            data = data.reshape(len(data), 1)
            last_count = len(line)
    csvfile.close()
    return data.T
项目:Py2DSpectroscopy    作者:SvenBo90    | 项目源码 | 文件源码
def clear_fit(self, **kwargs):

        # if no pixel was provided the current pixel is updated
        if 'pixel' not in kwargs.keys() or kwargs['pixel'] == -1:
            px = self._focus[0]
        else:
            px = kwargs['pixel'][0]

        # clear fit
        self._fit_functions[px, :] = numpy.zeros(6)
        self._fit_initial_parameters[px, :, :] = numpy.NAN
        self._fit_optimized_parameters[px, :, :] = numpy.NAN

        # emit signal
        if 'emit' not in kwargs or kwargs['emit']:
            self._app.fit_changed.emit(self._id)

    # TODO: Flip for 1D
项目:Py2DSpectroscopy    作者:SvenBo90    | 项目源码 | 文件源码
def clear_fit(self, **kwargs):

        # if no pixel was provided the current pixel is updated
        if 'pixel' not in kwargs.keys() or kwargs['pixel'] == -1:
            px = self._focus[0]
            py = self._focus[1]
        else:
            px = kwargs['pixel'][0]
            py = kwargs['pixel'][1]

        # clear fit
        self._fit_functions[px, py, :] = numpy.zeros(6)
        self._fit_initial_parameters[px, py, :, :] = numpy.NAN
        self._fit_optimized_parameters[px, py, :, :] = numpy.NAN

        # emit signal
        if 'emit' not in kwargs or kwargs['emit']:
            self._app.fit_changed.emit(self._id)
项目:FOLLOW    作者:adityagilra    | 项目源码 | 文件源码
def rasterplot(ax,trange,tstart,tend,spikesOut,n_neurons,colors=['r','b'],\
                            size=2.5,marker='.',sort=False):
    spikesPlot = []
    for i in n_neurons:
        spikesti = trange[spikesOut[:, i] > 0].ravel()
        spikesti = spikesti[np.where((spikesti>tstart) & (spikesti<tend))]
        if len(spikesti)==0: spikesPlot.append([np.NAN])
        else: spikesPlot.append(spikesti)
    if sort:
        idxs = np.argsort(
                [spikesPlot[i][0] for i in range(len(spikesPlot))] )
        idxs = idxs[::-1]                           # reverse sorted in time to first spike
    else: idxs = range(len(n_neurons))
    for i,idx in enumerate(idxs):
        ax.scatter(spikesPlot[idx],[i+1]*len(spikesPlot[idx]),\
                        marker=marker,s=size,\
                        facecolor=colors[i%2],lw=0,clip_on=False)
    ax.set_ylim((1,len(n_neurons)))
    ax.set_xlim((tstart,tend))
    ax.get_xaxis().get_major_formatter().set_useOffset(False)
项目:nanoraw    作者:marcus1487    | 项目源码 | 文件源码
def get_read_reg_events(r_data, interval_start, num_bases):
    r_means = nh.get_read_base_means(r_data)
    if r_data.start > interval_start:
        # handle reads that start in middle of region
        start_overlap = interval_start + num_bases - r_data.start
        # create region with nan values
        region_means = np.empty(num_bases)
        region_means[:] = np.NAN
        region_means[-start_overlap:] = r_means[:start_overlap]
    elif r_data.end < interval_start + num_bases:
        # handle reads that end inside region
        end_overlap = r_data.end - interval_start
        # create region with nan values
        region_means = np.empty(num_bases)
        region_means[:] = np.NAN
        region_means[:end_overlap] = r_means[-end_overlap:]
    else:
        skipped_bases = interval_start - r_data.start
        region_means = r_means[
            skipped_bases:skipped_bases + num_bases]

    return region_means
项目:eTraGo    作者:openego    | 项目源码 | 文件源码
def linkage(df, n_groups):
    # create the distance matrix based on the forbenius norm: |A-B|_F where A is
    # a 24 x N matrix with N the number of timeseries inside the dataframe df
    # TODO: We can save have time as we only need the upper triangle once as the
    # distance matrix is symmetric
    if True:
        Y = np.empty((n_groups, n_groups,))
        Y[:] = np.NAN
        for i in range(len(Y)):
            for j in range(len(Y[i,:])):
                A = df.loc[i+1].values
                B = df.loc[j+1].values
                #print('Computing distance of:{},{}'.format(i,j))
                Y[i,j] = norm(A-B, ord='fro')

    # condensed distance matrix as vector for linkage (upper triangle as a vector)
    y = Y[np.triu_indices(n_groups, 1)]
    # create linkage matrix with wards algorithm an euclidean norm
    Z = hac.linkage(y, method='ward', metric='euclidean')
    # R = hac.inconsistent(Z, d=10)
    return Z
项目:eTraGo    作者:openego    | 项目源码 | 文件源码
def fcluster(df, Z, n_groups, n_clusters):
    """
    """
    # create flat cluster, i.e. maximal number of clusters...
    T = hac.fcluster(Z, criterion='maxclust', depth=2, t=n_clusters)

    # add cluster id to original dataframe
    df['cluster_id'] = np.NAN
    # group is either days (1-365) or weeks (1-52)

    #for d in df.index.get_level_values('group').unique():
    for g in range(1, n_groups+1):
        # T[d-1] because df.index is e.g. 1-365 (d) and T= is 0...364
        df.ix[g, 'cluster_id'] = T[g-1]
    # add the cluster id to the index
    df.set_index(['cluster_id'], append=True, inplace=True)
    # set cluster id as first index level for easier looping through cluster_ids
    df.index = df.index.swaplevel(0, 'cluster_id')
    # just to have datetime at the last level of the multiindex df
    df.index = df.index.swaplevel('datetime', 'group')

    return df
项目:bayesianpy    作者:morganics    | 项目源码 | 文件源码
def set_value(self, column, value):
        if column not in self._columns:
            if isinstance(value, str):
                self._columns.update({
                    column: np.empty((1, len(self._df)), dtype="object")
                })
                self._columns[column][:] = ""
            elif isinstance(value, bool):
                self._columns.update({
                    column: np.empty((1, len(self._df)), dtype="bool")
                })
                self._columns[column][:] = False
            else:
                self._columns.update({
                    column: np.empty((1, len(self._df)))
                })
                self._columns[column][:] = np.NAN

        self._columns[column][0, self._current_row_index] = value
项目:gamtools    作者:pombo-lab    | 项目源码 | 文件源码
def linkage(counts_table):
    """
    Return the linkage disequilibrium (D) for an arbitrary number of
    loci given their contingency table.
    """

    probs_table = frequency_to_probability(counts_table)
    marginal_probs = get_marginal_probabilities(probs_table)

    if either_locus_not_detected(marginal_probs):
        return np.NAN

    exp_freqs = marginal_probs.prod(axis=0)[0]
    observed = probs_table.flat[-1]
    if observed == 0:
        return np.NAN
    return observed - exp_freqs
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def generate(self):
        self.__validate()
        self.__generatenoises()
        self.__generatejumps()
        processcount = len(self.__data._processnames)
        self.__data._processes = np.empty((self.__data._timecount, processcount))
        self.__data._processes[:] = np.NAN
        for time in range(self.__data._timecount):
            for pi, (pn, pf) in enumerate(zip(self.__data._processnames, self.__processfuncs)):
                self.__data._processes[time, pi] = pf(time, pn, self.__data)
        return self.__data.copy()
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def tonumpyarray(self, fill=None, symmetric=False):
        import numpy as np
        if fill is None: fill = np.NAN
        res = np.empty((self.__dim, self.__dim))
        idx = 0
        for i in range(self.__dim):
            for j in range(i+1):
                res[i,j] = self._data[idx]
                if symmetric: res[j,i] = res[i,j]
                idx += 1
            if not symmetric: res[i,i+1:self.__dim] = fill
        return res
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def tonumpyarray(self, fill=None, symmetric=False):
        import numpy as np
        if fill is None: fill = np.NAN
        res = np.empty((self.__dim, self.__dim))
        idx = 0
        for i in range(self.__dim):
            for j in range(i):
                res[i,j] = self._data[idx]
                if symmetric: res[j,i] = res[i,j]
                idx += 1
            res[i,i] = fill
            if not symmetric: res[i,i+1:self.__dim] = fill
        return res
项目:bayestsa    作者:thalesians    | 项目源码 | 文件源码
def __init__(self, x0, P0, params):
        self._params = params
        self.x = x0
        self.P = P0
        self._constterm = self._params.meanlogvar * (1. - self._params.persistence)
        self._cv = self._params.cor * self._params.voloflogvar
        self._cv2 = self._cv * self._cv
        self._p2 = self._params.persistence * self._params.persistence
        self._v2 = self._params.voloflogvar * self._params.voloflogvar
        self.predictedobservation = np.NAN
        self.lastobservation = None
        self.innov = np.NAN
        self.innovcov = np.NAN
        self.gain = np.NAN
        self.loglikelihood = 0.0
项目:uchroma    作者:cyanogen    | 项目源码 | 文件源码
def _compose_alpha(img_in, img_layer, opacity: float=1.0):
    """
    Calculate alpha composition ratio between two images.
    """
    comp_alpha = np.minimum(img_in[:, :, 3], img_layer[:, :, 3]) * opacity
    new_alpha = img_in[:, :, 3] + (1.0 - img_in[:, :, 3]) * comp_alpha
    np.seterr(divide='ignore', invalid='ignore')
    ratio = comp_alpha / new_alpha
    ratio[ratio == np.NAN] = 0.0
    return ratio
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def check_binary_nan(self, name, xp, dtype):
        a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
                     dtype=dtype)
        b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
                     dtype=dtype)
        return getattr(xp, name)(a, b)
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def check_unary_nan(self, name, xp, dtype):
        a = xp.array(
            [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, dtype('inf')],
            dtype=dtype)
        return (a,)
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def check_binary_nan(self, name, xp, dtype):
        a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
                     dtype=dtype)
        b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
                     dtype=dtype)
        return a, b
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def check_unary_nan(self, name, xp, dtype):
        a = xp.array(
            [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, numpy.inf],
            dtype=dtype)
        return getattr(xp, name)(a)
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def fill_symbol_value(symbols, valsyms, vals, fill_value=np.NAN):
    if valsyms.size == 0:
        return np.tile(np.NAN, len(symbols))
    values = np.tile(np.NAN, len(symbols)) if len(vals.shape) == 1 else np.tile(np.NAN, (len(symbols),vals.shape[1]))
    symbol2pos = ustr.get_str2pos_dict(symbols)
    for i in xrange(len(valsyms)):
        try:
            if len(vals.shape) == 1:
                values[symbol2pos[valsyms[i]]] = vals[i]
            else:
                values[symbol2pos[valsyms[i]], :] = vals[i, :]
        except Exception, e:
            pass
    values[np.isnan(values)] = fill_value
    return values
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def get_mat_ewma(tsmat, alpha):
    # EWMA(t) = alpha * ts(t) + (1-alpha) * EWMA(t-1)
    ewma = np.tile(np.NAN, tsmat.shape)
    for i in xrange(1, tsmat.shape[0]):
        init_selection = np.isnan(ewma[i-1, :])
        ewma[i-1, init_selection] = tsmat[i-1, init_selection]
        ewma[i, :] = alpha * tsmat[i, :] + (1-alpha) * ewma[i-1, :]
    return ewma
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def get_mat_movingstd(tsmat, periods):
    mstd = np.empty(shape = tsmat.shape)
    mstd.fill(np.NAN)
    for i in xrange(tsmat.shape[0]):
        j = i - periods + 1
        if j < 0:
            j = 0
        mstd[i,:] = np.nanstd(tsmat[j:i+1,:], 0)
    return mstd
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def get_mat_ma(tsmat, periods):
    ma = np.empty(shape = tsmat.shape)
    ma.fill(np.NAN)
    for i in xrange(tsmat.shape[0]):
        j = i - periods + 1
        if j < 0:
            j = 0
        ma[i,:] = np.nanmean(tsmat[j:i+1,:], 0)
    return ma
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def get_array_ma(ts, periods):
    ma = np.empty(shape = len(ts))
    ma.fill(np.NAN)
    for i in xrange(len(ts)):
        j = i - periods + 1
        if j < 0:
            j = 0
        ma[i] = np.nanmean(ts[j:i+1], 0)
    return ma


# 0 for add, 1 for multiply
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def ret2value(returns):
    values = np.empty(shape=len(returns) + 1)
    values.fill(np.NAN)
    values[0] = 1
    for i in xrange(len(returns)):
        values[i + 1] = values[i] * (1 + returns[i])
    return values
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def retmat2valuemat(retmat):
    valuemat = np.tile(np.NAN, (retmat.shape[0] + 1, retmat.shape[1]))
    for i in xrange(retmat.shape[1]):
        valuemat[:, i] = ret2value(retmat[:, i])
    return valuemat
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def value2ret(values):
    if len(values.shape) == 1:
        prevalues = np.append(np.NAN, values[0:-1])
        returns = (values - prevalues) / prevalues
        returns[np.isinf(returns)] = np.NAN
    else:
        prevalues = np.r_[
            np.tile(np.NAN, (1, values.shape[1])), values[0:-1, :]]
        returns = (values - prevalues) / prevalues
        returns[np.isinf(returns)] = np.NAN
    return returns
项目:finance_news_analysis    作者:pskun    | 项目源码 | 文件源码
def get_bstrs_pos_in_astrs(astrs, bstrs, astrs2pos={}):
    if not astrs2pos:
        astrs2pos = get_str2pos_dict(astrs)
    pos = np.zeros(shape=len(bstrs))
    count = 0
    for s in bstrs:
        try:
            pos[count] = astrs2pos[s]
        except Exception:
            pos[count] = np.NAN
        count += 1
    return pos
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_binary_nan(self, name, xp, dtype):
        a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
                     dtype=dtype)
        b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
                     dtype=dtype)
        return getattr(xp, name)(a, b)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_unary_nan(self, name, xp, dtype):
        a = xp.array(
            [-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, dtype('inf')],
            dtype=dtype)
        return getattr(xp, name)(a)
项目:dlsd    作者:ahartens    | 项目源码 | 文件源码
def _create_empty_variables(self):

        total_gap_size = self._get_total_length_of_all_gaps()
        self.new_df = pd.DataFrame(np.empty([self.orig_df.shape[0]+total_gap_size,self.orig_df.shape[1]]))
        self.new_df.iloc[:]=np.NAN
        self.new_time_stamps = []
        self.new_i = 0
项目:dlsd    作者:ahartens    | 项目源码 | 文件源码
def empty_np_array_with_size(self,rows,columns):
        np_array = np.zeros((rows,columns))
        np_array[:] = np.NAN
        return np_array
项目:Py2DSpectroscopy    作者:SvenBo90    | 项目源码 | 文件源码
def set_fit(self, fit_functions, fit_initial_parameters, fit_optimized_parameters, **kwargs):

        # if no pixel was provided the current pixel is updated
        if 'pixel' not in kwargs.keys() or kwargs['pixel'] == -1:
            px = self._focus[0]
        else:
            px = kwargs['pixel'][0]

        # clear the old fit data
        self._fit_functions[px, :] = numpy.zeros(6)
        self._fit_initial_parameters[px, :, :] = numpy.NAN
        self._fit_optimized_parameters[px, :, :] = numpy.NAN

        # set new fit data
        i_parameter = 0
        for i_peak in range(len(fit_functions)):
            if fit_functions[i_peak] == 1:
                self._fit_functions[px, i_peak] = 1
                self._fit_initial_parameters[px, i_peak, :3] = fit_initial_parameters[i_parameter:i_parameter+3]
                self._fit_initial_parameters[px, i_peak, 3] = 0
                self._fit_optimized_parameters[px, i_peak, :3] = fit_optimized_parameters[i_parameter:i_parameter+3]
                self._fit_optimized_parameters[px, i_peak, 3] = 0
                i_parameter += 3
            elif fit_functions[i_peak] == 2:
                self._fit_functions[px, i_peak] = 2
                self._fit_initial_parameters[px, i_peak, :3] = fit_initial_parameters[i_parameter:i_parameter+3]
                self._fit_initial_parameters[px, i_peak, 3] = 0
                self._fit_optimized_parameters[px, i_peak, :3] = fit_optimized_parameters[i_parameter:i_parameter+3]
                self._fit_optimized_parameters[px, i_peak, 3] = 0
                i_parameter += 3
            elif fit_functions[i_peak] == 3:
                self._fit_functions[px, i_peak] = 3
                self._fit_initial_parameters[px, i_peak, :] = fit_initial_parameters[i_parameter:i_parameter+4]
                self._fit_optimized_parameters[px, i_peak, :] = fit_optimized_parameters[i_parameter:i_parameter+4]
                i_parameter += 4

        # emit signal
        if 'emit' not in kwargs or kwargs['emit']:
            self._app.fit_changed.emit(self._id)
项目:Py2DSpectroscopy    作者:SvenBo90    | 项目源码 | 文件源码
def set_fit(self, fit_functions, fit_initial_parameters, fit_optimized_parameters, **kwargs):

        # if no pixel was provided the current pixel is updated
        if 'pixel' not in kwargs.keys() or kwargs['pixel'] == -1:
            px = self._focus[0]
            py = self._focus[1]
        else:
            px = kwargs['pixel'][0]
            py = kwargs['pixel'][1]

        # clear the old fit data
        self._fit_functions[px, py, :] = numpy.zeros(6)
        self._fit_initial_parameters[px, py, :, :] = numpy.NAN
        self._fit_optimized_parameters[px, py, :, :] = numpy.NAN

        # set new fit data
        i_parameter = 0
        for i_peak in range(len(fit_functions)):
            if fit_functions[i_peak] == 1:
                self._fit_functions[px, py, i_peak] = 1
                self._fit_initial_parameters[px, py, i_peak, :3] = fit_initial_parameters[i_parameter:i_parameter+3]
                self._fit_initial_parameters[px, py, i_peak, 3] = 0
                self._fit_optimized_parameters[px, py, i_peak, :3] = fit_optimized_parameters[i_parameter:i_parameter+3]
                self._fit_optimized_parameters[px, py, i_peak, 3] = 0
                i_parameter += 3
            elif fit_functions[i_peak] == 2:
                self._fit_functions[px, py, i_peak] = 2
                self._fit_initial_parameters[px, py, i_peak, :3] = fit_initial_parameters[i_parameter:i_parameter+3]
                self._fit_initial_parameters[px, py, i_peak, 3] = 0
                self._fit_optimized_parameters[px, py, i_peak, :3] = fit_optimized_parameters[i_parameter:i_parameter+3]
                self._fit_optimized_parameters[px, py, i_peak, 3] = 0
                i_parameter += 3
            elif fit_functions[i_peak] == 3:
                self._fit_functions[px, py, i_peak] = 3
                self._fit_initial_parameters[px, py, i_peak, :] = fit_initial_parameters[i_parameter:i_parameter+4]
                self._fit_optimized_parameters[px, py, i_peak, :] = fit_optimized_parameters[i_parameter:i_parameter+4]
                i_parameter += 4

        # emit signal
        if 'emit' not in kwargs or kwargs['emit']:
            self._app.fit_changed.emit(self._id)
项目:blend_modes    作者:flrs    | 项目源码 | 文件源码
def _compose_alpha(img_in, img_layer, opacity):
    """
    Calculate alpha composition ratio between two images.
    """

    comp_alpha = np.minimum(img_in[:, :, 3], img_layer[:, :, 3])*opacity
    new_alpha = img_in[:, :, 3] + (1.0 - img_in[:, :, 3])*comp_alpha
    np.seterr(divide='ignore', invalid='ignore')
    ratio = comp_alpha/new_alpha
    ratio[ratio == np.NAN] = 0.0
    return ratio
项目:scikit-discovery    作者:MITHaystack    | 项目源码 | 文件源码
def process(self, obj_data):
        ''' 
        NaN's data from DataWrapper

        @param obj_data: Input DataWrapper, which will be modified in place
        '''

        labels = self.labels
        column_names = self.column_names

        for label, data, err in obj_data.getIterator():
            if (labels is None or label in labels) and \
               (column_names is None or data.name in column_names):

                index = data.index

                if self.start is None:
                    start = index[0]
                else:
                    start = self.start

                if self.end is None:
                    end = index[-1]
                else:
                    end = self.end

                new_nans = np.empty(len(data[start:end]))
                new_nans[:] = np.NAN
                new_nans = pd.Series(new_nans, index=data[start:end].index)

                data.loc[start:end] = new_nans
项目:nanoraw    作者:marcus1487    | 项目源码 | 文件源码
def calc_fishers_method(pos_pvals, offset):
    pvals_np = np.empty(pos_pvals[-1][1] + 1)
    pvals_np[:] = np.NAN
    pvals_np[[list(zip(*pos_pvals)[1])]] = np.maximum(
        zip(*pos_pvals)[0], nh.SMALLEST_PVAL)

    fishers_pvals = [
        _calc_fm_pval(pvals_np[pos - offset:pos + offset + 1])
        if pos - offset >= 0 and
        pos + offset + 1 <= pvals_np.shape[0] and
        not np.any(np.isnan(pvals_np[pos - offset:pos + offset + 1])
                   ) else 1.0
        for _, pos, _, _ in pos_pvals]

    return fishers_pvals
项目:improver    作者:metoppv    | 项目源码 | 文件源码
def test_single_point_nan(self):
        """Test behaviour for a single NaN grid cell."""
        self.cube.data[0][0][6][7] = np.NAN
        msg = "NaN detected in input cube data"
        with self.assertRaisesRegexp(ValueError, msg):
            neighbourhood_method = CircularNeighbourhood
            NBHood(neighbourhood_method, self.RADIUS).process(self.cube)
项目:improver    作者:metoppv    | 项目源码 | 文件源码
def test_threshold_point_nan(self):
        """Test behaviour for a single NaN grid cell."""
        # Need to copy the cube as we're adjusting the data.
        self.cube.data[0][2][2] = np.NAN
        msg = "NaN detected in input cube data"
        plugin = Threshold(
            2.0, fuzzy_factor=self.fuzzy_factor, below_thresh_ok=True)
        with self.assertRaisesRegexp(ValueError, msg):
            plugin.process(self.cube)
项目:esmgrids    作者:DoublePrecision    | 项目源码 | 文件源码
def make_corners(self):

        x = self.x_t
        y = self.y_t

        dx_half = np.empty_like(x)
        dy_half = np.empty_like(x)

        dx_half[:, 1:] = (x[:, 1:] - x[:, 0:-1]) / 2.0
        dy_half[1:, :] = (y[1:, :] - y[0:-1, :]) / 2.0

        # Need to extend South
        dy_half[0, 1:] = dy_half[1, 1:]
        dx_half[0, 1:] = dx_half[1, 1:]

        # and West
        dy_half[:, 0] = dy_half[:, 1]
        dx_half[:, 0] = dx_half[:, 1]

        clon = np.empty((self.num_lat_points, self.num_lon_points, 4))
        clon[:] = np.NAN

        clon[:, :, 0] = x - dx_half
        clon[:, :, 1] = x + dx_half
        clon[:, :, 2] = x + dx_half
        clon[:, :, 3] = x - dx_half
        assert(not np.isnan(np.sum(clon)))

        clat = np.empty((self.num_lat_points, self.num_lon_points, 4))
        clat[:] = np.NAN
        clat[:, :, 0] = y - dy_half
        clat[:, :, 1] = y - dy_half
        clat[:, :, 2] = y + dy_half
        clat[:, :, 3] = y + dy_half
        assert(not np.isnan(np.sum(clat)))

        self.clon_t = clon
        self.clat_t = clat
项目:esmgrids    作者:DoublePrecision    | 项目源码 | 文件源码
def calc_area_of_polygons(clons, clats):
    """
    Calculate the area of lat-lon polygons.

    We project sphere onto a flat surface using an equal area projection
    and then calculate the area of flat polygon.

    This is slow we should do some caching to avoid recomputing.
    """

    areas = np.zeros(clons.shape[1:])
    areas[:] = np.NAN

    for j in range(areas.shape[0]):
        for i in range(areas.shape[1]):

            lats = clats[:, j, i]
            lons = clons[:, j, i]

            lat_centre = lats[0] + abs(lats[2] - lats[1]) / 2
            lon_centre = lons[0] + abs(lons[1] - lons[0]) / 2

            pa = pyproj.Proj(proj_str.format(lat_centre, lon_centre))
            x, y = pa(lons, lats)

            cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
            areas[j, i] = shape(cop).area

    assert(np.sum(areas) is not np.NAN)
    assert(np.min(areas) > 0)

    return areas
项目:combinatorialHiC    作者:VRam142    | 项目源码 | 文件源码
def cell2val(matrix_list, index):
    pos_index = 0
    matpos = {}
    for i in range(0,index):
        for j in range(0,index):
            if i <= j:
                matpos[(i,j)] = pos_index
                pos_index += 1
    valmatrix = np.zeros(pos_index)
    for matrix in matrix_list:
        newvec = np.zeros(pos_index)
        matrix_open = open(matrix)
        for line in matrix_open:
            bin1, bin2, raw, norm, chrom1, chrom2 = line.split()
#            if chrom1 == chrom2: #Ignore intrachromosomal contacts for now; could also implement as a check to see how close bin1 and bin2 are
#                norm = np.NAN
            coord = (int(bin1), int(bin2))
            pos = matpos[coord]
        if bin1 == bin2 or raw == 0:
                newvec[pos] = 0 #set to which value you want to compute cors for (root normalized coverage, or raw coverage)
            else:
        newvec[pos] = log10(float(raw))
        valmatrix = np.vstack((valmatrix, newvec))
        matrix_open.close()
    valmatrix = np.delete(valmatrix, 0, 0)
    return valmatrix
项目:seqmod    作者:emanjavacas    | 项目源码 | 文件源码
def _init_pane(self):
        nan = np.array([np.NAN, np.NAN])
        X = np.column_stack([nan] * len(self.legend))
        Y = np.column_stack([nan] * len(self.legend))
        return self.viz.line(
            X=X, Y=Y, env=self.env, opts=self.opts)
项目:ocean-regrid    作者:nicjhan    | 项目源码 | 文件源码
def make_corners(self):

        # Uses double density grid to figure out corners.
        x = self.x_vt
        y = self.y_vt

        # Corners of t points. Index 0 is bottom left and then
        # anti-clockwise.
        clon = np.empty((self.x_t.shape[0], self.x_t.shape[1], 4))
        clon[:] = np.NAN
        clon[:,:,0] = x[0:-1:2,0:-1:2]
        clon[:,:,1] = x[0:-1:2,2::2]
        clon[:,:,2] = x[2::2,2::2]
        clon[:,:,3] = x[2::2,0:-1:2]
        assert(not np.isnan(np.sum(clon)))

        clat = np.empty((self.x_t.shape[0], self.x_t.shape[1], 4))
        clat[:] = np.NAN
        clat[:,:,0] = y[0:-1:2,0:-1:2]
        clat[:,:,1] = y[0:-1:2,2::2]
        clat[:,:,2] = y[2::2,2::2]
        clat[:,:,3] = y[2::2,0:-1:2]
        assert(not np.isnan(np.sum(clat)))

        self.clon_t = clon
        self.clat_t = clat
项目:ocean-regrid    作者:nicjhan    | 项目源码 | 文件源码
def make_corners(self):

        x = self.x_t
        y = self.y_t

        dx_half = self.dx / 2.0
        dy_half = self.dy / 2.0

        # Set grid corners, we do these one corner at a time. Start at the 
        # bottom left and go anti-clockwise. This is the SCRIP convention.
        clon = np.empty((self.num_lat_points, self.num_lon_points, 4))
        clon[:] = np.NAN
        clon[:,:,0] = x - dx_half
        clon[:,:,1] = x + dx_half
        clon[:,:,2] = x + dx_half
        clon[:,:,3] = x - dx_half
        assert(not np.isnan(np.sum(clon)))

        clat = np.empty((self.num_lat_points, self.num_lon_points, 4))
        clat[:] = np.NAN
        clat[:,:,0] = y - dy_half
        clat[:,:,1] = y - dy_half
        clat[:,:,2] = y + dy_half
        clat[:,:,3] = y + dy_half
        assert(not np.isnan(np.sum(clat)))

        # The bottom latitude band should always be Southern extent.
        assert(np.all(clat[0, :, 0] == np.min(y) - dy_half))
        assert(np.all(clat[0, :, 1] == np.min(y) - dy_half))

        # The top latitude band should always be Northern extent.
        assert(np.all(clat[-1, :, 2] == np.max(y) + dy_half))
        assert(np.all(clat[-1, :, 3] == np.max(y) + dy_half))

        self.clon_t = clon
        self.clat_t = clat
项目:faampy    作者:ncasuk    | 项目源码 | 文件源码
def __mask_plot_data__(self, arr):
        #tmp = self.Data.variables[key][:].ravel()FIGURE_FILENAME_TEMPLATE = 'qa-cab_pres_temp_%s_r%.2i_%s.png
        #arr.ravel()[self.Index] = np.NAN
        arr[self.Index] = np.NAN
        return arr
项目:gamtools    作者:pombo-lab    | 项目源码 | 文件源码
def cosegregation(counts_table):
    """
    Return the co-segregation frequency of n loci given their
    contingency table.
    """

    probs_table = frequency_to_probability(counts_table)

    if either_locus_not_detected(probs_table):
        return np.NAN

    return probs_table.flat[-1]
项目:gamtools    作者:pombo-lab    | 项目源码 | 文件源码
def expected(counts_table):
    """
    Return the expected co-segregation probability of an arbitrary number
    of loci given their contingency table.
    """

    probs_table = frequency_to_probability(counts_table)
    marginal_probs = get_marginal_probabilities(probs_table)

    if either_locus_not_detected(marginal_probs):
        return np.NAN

    exp_freqs = marginal_probs.prod(axis=0)[0]
    return exp_freqs