Python numpy 模块,empty_like() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.empty_like()

项目:npstreams    作者:LaurentRDC    | 项目源码 | 文件源码
def test_out_parameter(self):
        """ Test that the kwargs ``out`` is correctly passed to reduction function """

        with self.subTest('axis = -1'):
            not_out = last(ireduce_ufunc(self.source, np.add, axis = -1))
            out = np.empty_like(self.source[0])
            last(ireduce_ufunc(self.source, ufunc = np.add, out = out))

            self.assertTrue(np.allclose(not_out, out))

        with self.subTest('axis != -1'):
            not_out = last(ireduce_ufunc(self.source, np.add, axis = 2))
            out = np.empty_like(self.source[0])
            from_out = last(ireduce_ufunc(self.source, ufunc = np.add, out = out, axis = 2))

            self.assertTrue(np.allclose(not_out, from_out))
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def log_loss_value(Z, weights, total_weights, rho):
    """
    computes the value and slope of the logistic loss in a numerically stable way
    supports sample non-negative weights for each example in the training data
    see http://stackoverflow.com/questions/20085768/

    Parameters
    ----------
    Z               numpy.array containing training data with shape = (n_rows, n_cols)
    rho             numpy.array of coefficients with shape = (n_cols,)
    total_weights   numpy.sum(total_weights) (only included to reduce computation)
    weights         numpy.array of sample weights with shape (n_rows,)

    Returns
    -------
    loss_value  scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))

    """
    scores = Z.dot(rho)
    pos_idx = scores > 0
    loss_value = np.empty_like(scores)
    loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
    loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
    loss_value = loss_value.dot(weights) / total_weights
    return loss_value
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def log_loss_value(Z, rho):
    """
    computes the value and slope of the logistic loss in a numerically stable way
    see also: http://stackoverflow.com/questions/20085768/

    Parameters
    ----------
    Z           numpy.array containing training data with shape = (n_rows, n_cols)
    rho         numpy.array of coefficients with shape = (n_cols,)

    Returns
    -------
    loss_value  scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))

    """
    scores = Z.dot(rho)
    pos_idx = scores > 0
    loss_value = np.empty_like(scores)
    loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
    loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
    loss_value = loss_value.mean()
    return loss_value
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def log_probs(Z, rho):
    """
    compute the probabilities of the logistic loss function in a way that is numerically stable

    see also: http://stackoverflow.com/questions/20085768/
    Parameters
    ----------
    Z           numpy.array containing training data with shape = (n_rows, n_cols)
    rho         numpy.array of coefficients with shape = (n_cols,)

    Returns
    -------
    log_probs   numpy.array of probabilities under the logit model
    """

    scores = Z.dot(rho)
    pos_idx = scores > 0
    log_probs = np.empty_like(scores)
    log_probs[pos_idx]  = 1.0 / (1.0 + np.exp(-scores[pos_idx]))
    log_probs[~pos_idx] = np.exp(scores[~pos_idx]) / (1.0 + np.exp(scores[~pos_idx]))
    return log_probs
项目:pyro    作者:uber    | 项目源码 | 文件源码
def sample(self):
        """
        Draws either a single sample (if alpha.dim() == 1), or one sample per param (if alpha.dim() == 2).

        (Un-reparameterized).

        :param torch.autograd.Variable alpha:
        """
        alpha_np = self.alpha.data.cpu().numpy()
        if self.alpha.dim() == 1:
            x_np = spr.dirichlet.rvs(alpha_np)[0]
        else:
            x_np = np.empty_like(alpha_np)
            for i in range(alpha_np.shape[0]):
                x_np[i, :] = spr.dirichlet.rvs(alpha_np[i, :])[0]
        x = Variable(type(self.alpha.data)(x_np))
        return x
项目:numba-examples    作者:numba    | 项目源码 | 文件源码
def black_scholes_numba(stockPrice, optionStrike,
                        optionYears, Riskfree, Volatility):
    callResult = np.empty_like(stockPrice)
    putResult = np.empty_like(stockPrice)

    S = stockPrice
    X = optionStrike
    T = optionYears
    R = Riskfree
    V = Volatility
    for i in range(len(S)):
        sqrtT = math.sqrt(T[i])
        d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
        d2 = d1 - V * sqrtT
        cndd1 = cnd_numba(d1)
        cndd2 = cnd_numba(d2)

        expRT = math.exp((-1. * R) * T[i])
        callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
        putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))

    return callResult, putResult
项目:piecewise    作者:DataDog    | 项目源码 | 文件源码
def predict(self, t_new):
        """ Use the segments in this model to predict the v value for new t values.
        Params:
            t_new (np.array): t values for which predictions should be made
        Returns:
            np.array of predictions
        """
        v_hats = np.empty_like(t_new, dtype=float)
        for idx, t in enumerate(t_new):
            # Find the applicable segment.
            seg_index = bisect.bisect_left(self._starts, t) - 1
            seg = self.segments[max(0, seg_index)]
            # Use it for prediction
            v_hats[idx] = seg.predict(t)
        return v_hats


## Data structures used during the fitting of the regression in `piecewise()`.


# Segment represents a time range and a linear regression fit through it.
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def chrom_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    opp = opp_convert(arr)
    out = np.empty_like(opp[:,:,[0,1]])

    rg = opp[:,:,0]
    by = opp[:,:,1]
    intensity = opp[:,:,2]

    lowi = intensity < 0.1*intensity.max()
    rg[lowi] = 0
    by[lowi] = 0

    denom = intensity
    denom[denom==0] = 1
    out[:,:,0] = rg / denom
    out[:,:,1] = by / denom

    return out

# ------------------------------------------------------------------------------
项目:nimo    作者:wolfram2012    | 项目源码 | 文件源码
def rg2_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    out = np.empty_like(arr[:,:,[0,1]])

    red = arr[:,:,0]
    green = arr[:,:,1]
    #blue = arr[:,:,2]
    intensity = arr.mean(2)

    lowi = intensity < 0.1*intensity.max()
    arr[lowi] = 0

    denom = arr.sum(2)
    denom[denom==0] = 1
    out[:,:,0] = red / denom
    out[:,:,1] = green / denom

    return out

# ------------------------------------------------------------------------------
项目:smrt    作者:smrt-model    | 项目源码 | 文件源码
def ft_autocorrelation_function(self, k):
        """Compute the 3D Fourier transform of the isotropic correlation
        function for an independent sphere for given magnitude k of the 3D wave vector
        (float).

        """

        X = self.radius * np.asarray(k)
        volume_sphere = 4.0 / 3 * np.pi * self.radius**3

        bessel_term = np.empty_like(X)
        zero_X = np.isclose(X, 0)
        non_zero_X = np.logical_not(zero_X)
        X_non_zero = X[non_zero_X]

        bessel_term[non_zero_X] = (9 * ((np.sin(X_non_zero) - X_non_zero * np.cos(X_non_zero))
                                        / X_non_zero**3)**2)
        bessel_term[zero_X] = 1.0
        return self.corr_func_at_origin * volume_sphere * bessel_term
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def make_deepfool(sess, env, X_data, epochs=1, batch_size=128):
    """
    Generate FGSM by running env.xadv.
    """
    print('\nMaking adversarials via FGSM')

    n_sample = X_data.shape[0]
    n_batch = int((n_sample + batch_size - 1) / batch_size)
    X_adv = np.empty_like(X_data)

    for batch in range(n_batch):
        print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
        start = batch * batch_size
        end = min(n_sample, start + batch_size)
        adv = sess.run(env.xadv, feed_dict={env.x: X_data[start:end],
                                            env.adv_epochs: epochs})
        X_adv[start:end] = adv
    print()

    return X_adv
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def make_jsma(sess, env, X_data, epochs=0.2, eps=1.0, batch_size=128):
    """
    Generate JSMA by running env.x_jsma.
    """
    print('\nMaking adversarials via JSMA')

    n_sample = X_data.shape[0]
    n_batch = int((n_sample + batch_size - 1) / batch_size)
    X_adv = np.empty_like(X_data)

    for batch in range(n_batch):
        print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
        start = batch * batch_size
        end = min(n_sample, start + batch_size)
        feed_dict = {
            env.x: X_data[start:end],
            env.target: np.random.choice(n_classes),
            env.adv_epochs: epochs,
            env.adv_eps: eps}
        adv = sess.run(env.x_jsma, feed_dict=feed_dict)
        X_adv[start:end] = adv
    print()

    return X_adv
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def make_fgsm(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
    """
    Generate FGSM by running env.x_fgsm.
    """
    print('\nMaking adversarials via FGSM')

    n_sample = X_data.shape[0]
    n_batch = int((n_sample + batch_size - 1) / batch_size)
    X_adv = np.empty_like(X_data)

    for batch in range(n_batch):
        print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
        start = batch * batch_size
        end = min(n_sample, start + batch_size)
        adv = sess.run(env.x_fgsm, feed_dict={
            env.x: X_data[start:end],
            env.fgsm_eps: eps,
            env.fgsm_epochs: epochs})
        X_adv[start:end] = adv
    print()

    return X_adv
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def make_deepfool(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
    """
    Generate FGSM by running env.xadv.
    """
    print('\nMaking adversarials via FGSM')

    n_sample = X_data.shape[0]
    n_batch = int((n_sample + batch_size - 1) / batch_size)
    X_adv = np.empty_like(X_data)

    for batch in range(n_batch):
        print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
        start = batch * batch_size
        end = min(n_sample, start + batch_size)
        adv = sess.run(env.xadv, feed_dict={env.x: X_data[start:end],
                                            env.adv_epochs: epochs})
        X_adv[start:end] = adv
    print()

    return X_adv
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def make_jsma(sess, env, X_data, y, epochs=0.2, eps=1.0, batch_size=128):
    """
    Generate JSMA by running env.x_jsma.
    """
    print('\nMaking adversarials via JSMA')

    n_sample = X_data.shape[0]
    n_batch = int((n_sample + batch_size - 1) / batch_size)
    X_adv = np.empty_like(X_data)

    for batch in range(n_batch):
        print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
        start = batch * batch_size
        end = min(n_sample, start + batch_size)
        feed_dict = {
            env.x: X_data[start:end],
            env.target: y,
            env.adv_epochs: epochs,
            env.adv_eps: eps}
        adv = sess.run(env.x_jsma, feed_dict=feed_dict)
        X_adv[start:end] = adv
    print()

    return X_adv
项目:gconv_experiments    作者:tscohen    | 项目源码 | 文件源码
def dihedral_transform_batch(x):

    g = np.random.randint(low=0, high=8, size=x.shape[0])

    h, w = x.shape[-2:]
    hh = (h - 1) / 2.
    hw = (w - 1) / 2.

    I, J = np.meshgrid(np.linspace(-hh, hh, x.shape[-2]), np.linspace(-hw, hw, x.shape[-1]))
    C = np.r_[[I, J]]
    D4C = np.einsum('...ij,jkl->...ikl', D4, C)
    D4C[:, 0] += hh
    D4C[:, 1] += hw
    D4C = D4C.astype(int)

    x_out = np.empty_like(x)
    for i in range(x.shape[0]):
        I, J = D4C[g[i]]
        x_out[i, :] = x[i][:, J, I]

    return x_out
项目:asr_preprocessing    作者:hirofumi0810    | 项目源码 | 文件源码
def _delta(feat, N):
    """Compute delta features from a feature vector sequence.
    Args:
        feat: A numpy array of size (NUMFRAMES by number of features)
            containing features. Each row holds 1 feature vector.
        N: For each frame, calculate delta features based on preceding and
            following N frames
    Returns:
        A numpy array of size (NUMFRAMES by number of features) containing
            delta features. Each row holds 1 delta feature vector.
    """
    if N < 1:
        raise ValueError('N must be an integer >= 1')
    NUMFRAMES = len(feat)
    denominator = 2 * sum([i**2 for i in range(1, N + 1)])
    delta_feat = np.empty_like(feat)
    # padded version of feat
    padded = np.pad(feat, ((N, N), (0, 0)), mode='edge')
    for t in range(NUMFRAMES):
        # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
        delta_feat[t] = np.dot(np.arange(-N, N + 1),
                               padded[t: t + 2 * N + 1]) / denominator
    return delta_feat
项目:bifrost    作者:ledatelescope    | 项目源码 | 文件源码
def test_polarisation_products(self):
        n = 89
        real = np.random.randint(-127, 128, size=(n,2)).astype(np.float32)
        imag = np.random.randint(-127, 128, size=(n,2)).astype(np.float32)
        a = real + 1j * imag
        a_orig = a
        a = bf.asarray(a, space='cuda')
        b = bf.empty_like(a)
        for _ in xrange(3):
            bf.map('''
            auto x = a(_,0);
            auto y = a(_,1);
            b(_,0).assign(x.mag2(), y.mag2());
            b(_,1) = x*y.conj();
            ''', shape=b.shape[:-1], data={'a': a, 'b': b})
        b = b.copy('system')
        a = a_orig
        gold = np.empty_like(a)
        def mag2(x):
            return x.real * x.real + x.imag * x.imag
        gold[...,0] = mag2(a[...,0]) + 1j * mag2(a[...,1])
        gold[...,1] = a[...,0] * a[...,1].conj()
        np.testing.assert_equal(b, gold)
项目:Speaker_recognition    作者:Mgajurel    | 项目源码 | 文件源码
def delta(feat, N):
    """Compute delta features from a feature vector sequence.

    :param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
    :param N: For each frame, calculate delta features based on preceding and following N frames
    :returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
    """
    if N < 1:
        raise ValueError('N must be an integer >= 1')
    NUMFRAMES = len(feat)
    denominator = 2 * sum([i**2 for i in range(1, N+1)])
    delta_feat = numpy.empty_like(feat)
    padded = numpy.pad(feat, ((N, N), (0, 0)), mode='edge')   # padded version of feat
    for t in range(NUMFRAMES):
        delta_feat[t] = numpy.dot(numpy.arange(-N, N+1), padded[t : t+2*N+1]) / denominator   # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
    return delta_feat
项目:Evolutionary-Algorithm    作者:MorvanZhou    | 项目源码 | 文件源码
def make_kid(pop, n_kid):
    # generate empty kid holder
    kids = {'DNA': np.empty((n_kid, DNA_SIZE))}
    kids['mut_strength'] = np.empty_like(kids['DNA'])
    for kv, ks in zip(kids['DNA'], kids['mut_strength']):
        # crossover (roughly half p1 and half p2)
        p1, p2 = np.random.choice(np.arange(POP_SIZE), size=2, replace=False)
        cp = np.random.randint(0, 2, DNA_SIZE, dtype=np.bool)  # crossover points
        kv[cp] = pop['DNA'][p1, cp]
        kv[~cp] = pop['DNA'][p2, ~cp]
        ks[cp] = pop['mut_strength'][p1, cp]
        ks[~cp] = pop['mut_strength'][p2, ~cp]

        # mutate (change DNA based on normal distribution)
        ks[:] = np.maximum(ks + (np.random.rand(*ks.shape)-0.5), 0.)    # must > 0
        kv += ks * np.random.randn(*kv.shape)
        kv[:] = np.clip(kv, *DNA_BOUND)    # clip the mutated value
    return kids
项目:arlpy    作者:org-arl    | 项目源码 | 文件源码
def invert_map(x):
    """Generate an inverse map.

    :param x: map, such as that generated by :func:`gray_code`
    :returns: an inverse map y, such that ``y[x[j]] = j``

    >>> import arlpy
    >>> x = arlpy.comms.gray_code(8)
    >>> y = arlpy.comms.invert_map(x)
    >>> x[2]
    3
    >>> y[3]
    2
    """
    y = _np.empty_like(x)
    y[x] = _np.arange(len(x))
    return y
项目:hpat    作者:IntelLabs    | 项目源码 | 文件源码
def _handle_empty_like(self, lhs, rhs, assign, call_table):
        # B = empty_like(A) -> B = empty(len(A), dtype)
        if (rhs.op == 'call'
                and rhs.func.name in call_table
                and call_table[rhs.func.name] == ['empty_like', np]):
            in_arr= rhs.args[0]
            def f(A):
                c = len(A)
            f_block = compile_to_numba_ir(f, {}, self.typingctx, (self.typemap[in_arr.name],),
                                self.typemap, self.calltypes).blocks.popitem()[1]
            replace_arg_nodes(f_block, [in_arr])
            nodes = f_block.body[:-3]  # remove none return
            size_var = nodes[-1].target
            alloc_nodes = mk_alloc(self.typemap, self.calltypes, assign.target,
                            size_var,
                            self.typemap[in_arr.name].dtype, in_arr.scope, in_arr.loc)
            return nodes + alloc_nodes
        return None
项目:hpat    作者:IntelLabs    | 项目源码 | 文件源码
def _handle_df_col_filter(self, lhs_name, rhs, assign):
        # find df['col2'] = df['col1'][arr]
        # since columns should have the same size, output is filled with NaNs
        # TODO: check for float, make sure col1 and col2 are in the same df
        if (rhs.op=='getitem'
                and rhs.value.name in self.df_cols
                and lhs_name in self.df_cols
                and self.is_bool_arr(rhs.index.name)):
            lhs = assign.target
            in_arr = rhs.value
            index_var = rhs.index
            f_blocks = compile_to_numba_ir(_column_filter_impl_float,
                    {'numba': numba, 'np': np}, self.typingctx,
                    (self.typemap[lhs.name], self.typemap[in_arr.name],
                    self.typemap[index_var.name]),
                    self.typemap, self.calltypes).blocks
            first_block = min(f_blocks.keys())
            replace_arg_nodes(f_blocks[first_block], [lhs, in_arr, index_var])
            alloc_nodes = gen_np_call('empty_like', np.empty_like, lhs, [in_arr],
                        self.typingctx, self.typemap, self.calltypes)
            f_blocks[first_block].body = alloc_nodes + f_blocks[first_block].body
            return f_blocks
项目:tensorflow_end2end_speech_recognition    作者:hirofumi0810    | 项目源码 | 文件源码
def _delta(feat, N):
    """Compute delta features from a feature vector sequence.
    Args:
        feat: A numpy array of size (NUMFRAMES by number of features)
            containing features. Each row holds 1 feature vector.
        N: For each frame, calculate delta features based on preceding and
            following N frames
    Returns:
        A numpy array of size (NUMFRAMES by number of features) containing
            delta features. Each row holds 1 delta feature vector.
    """
    if N < 1:
        raise ValueError('N must be an integer >= 1')
    NUMFRAMES = len(feat)
    denominator = 2 * sum([i**2 for i in range(1, N + 1)])
    delta_feat = np.empty_like(feat)
    # padded version of feat
    padded = np.pad(feat, ((N, N), (0, 0)), mode='edge')
    for t in range(NUMFRAMES):
        # [t : t+2*N+1] == [(N+t)-N : (N+t)+N+1]
        delta_feat[t] = np.dot(np.arange(-N, N + 1),
                               padded[t: t + 2 * N + 1]) / denominator
    return delta_feat
项目:python_scripting_for_spatial_data_processing    作者:upsdeepak    | 项目源码 | 文件源码
def classifyLevel1Assign(classLevel1Img):
    # Create Output Array
    level1 = numpy.empty_like(classLevel1Img, dtype = numpy.dtype('a255'))
    level1[...] = "NA"
    # Non Vegetated
    level1 = numpy.where(numpy.logical_or(classLevel1Img == "NA",
                                          numpy.logical_or(classLevel1Img == "Water",
                                                           classLevel1Img == "Urban")),
                         "Non Vegetated", level1)

    # Vegetated
    level1 = numpy.where(numpy.logical_or(classLevel1Img == "Photosynthetic Vegetated",
                                          classLevel1Img == "Non Photosynthetic Vegetated",
                                          classLevel1Img == "Non Submerged Aquatic Vegetated"),
                         "Vegetated", level1)

    return level1

# A function for classifying level 2
项目:ML_algorithm    作者:luoshao23    | 项目源码 | 文件源码
def init_param(self, nodes_list):
        if self.activation == 'logistic':
            init_bound = lambda inb, outb: np.sqrt(2. / (inb + outb))
        else:
            init_bound = lambda inb, outb: np.sqrt(6. / (inb + outb))

        self.ww = [self._random_state.uniform(-init_bound(nodes_list[i], nodes_list[i + 1]), init_bound(nodes_list[i], nodes_list[i + 1]), (nodes_list[i], nodes_list[i + 1]))
                   for i in xrange(self.layers_ - 1)]

        self.th = [self._random_state.uniform(-init_bound(nodes_list[i], nodes_list[i + 1]), init_bound(nodes_list[i], nodes_list[i + 1]), (nodes_list[i + 1],))
                   for i in xrange(self.layers_ - 1)]

        self.dww = [np.empty_like(w) for w in self.ww]

        self.dww_last = [np.empty_like(w) for w in self.ww]

        self.dth = [np.empty_like(th) for th in self.th]

        self.z = [np.empty_like(th) for th in self.th]

        self.a = [np.empty_like(th) for th in self.th]

        self.ro = [np.empty_like(th) for th in self.th]

        self.delta = [np.empty_like(th) for th in self.th]
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def logistic(x, prime=0):
    if prime == 0:
        ##v = np.empty_like(x)
        ##mask = x < 0.0

        ##zl = np.exp(x[mask])
        ##zl = 1.0 / (1.0 + zl)
        ##v[mask] = zl

        ##zh = np.exp(-x[~mask])
        ##zh = zh / (1.0 + zh)
        ##v[~mask] = zh

        v = sps.expit(x)

        return v

    elif prime == 1:
        return logistic(x) * (1.0 - logistic(x))

    else:
        raise NotImplementedError('%d order derivative not implemented.' % int(prime))
项目:cebl    作者:idfah    | 项目源码 | 文件源码
def exprect(x, prime=0):
    #v = np.empty_like(x)
    #mask = x >= 0.0
    #nmask = ~mask

    #if prime == 0:
    #    v[mask] = x[mask]
    #    v[nmask] = np.exp(x[nmask]) - 1.0

    #elif prime == 1:
    #    v[mask] = 1.0
    #    v[nmask] = np.exp(x[nmask])

    mask = x < 0.0

    if prime == 0:
        v = x.copy()
        v[mask] = np.exp(v[mask]) - 1.0

    elif prime == 1:
        v = np.ones_like(x)
        v[mask] = np.exp(v[mask])

    return v
项目:gtable    作者:guillemborrell    | 项目源码 | 文件源码
def apply_mask_column(data, index, mask):
    """
    Sieve picks a mask over data and returns the filtered data array and index
    """
    new_data = data[mask]
    new_index = np.empty_like(index)

    data_cursor = 0
    for i, idx in enumerate(index):
        if idx:
            if mask[data_cursor]:
                new_index[i] = 1
            else:
                new_index[i] = 0
            data_cursor += 1
        else:
            new_index[i] = 0

    return new_data, new_index
项目:Tethys    作者:JosePedroMatos    | 项目源码 | 文件源码
def predictiveQQ(simulations, targets, bands):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
    bands = toCustomLogSpace(np.array(bands)[::-1])
    pValues = np.empty_like(targets)
    for i0 in range(pValues.shape[0]):
        sims, idxs = np.unique(simulations[i0,:],return_index=True)
        try:
            pValues[i0] = interp1d(sims, bands[idxs], kind='linear', assume_sorted=True)(targets[i0])
        except np.linalg.linalg.LinAlgError as ex:
            pValues[i0] = np.nan
        except ValueError as ex:
            # TODO: handle better extrapolations
            if targets[i0]<sims[0]:
                pValues[i0] = bands[0]+(bands[0]-bands[1])/(sims[0]-sims[1])*(targets[i0]-sims[0])
            else:
                pValues[i0] = bands[-1]+(bands[-1]-bands[-2])/(sims[-1]-sims[-2])*(targets[i0]-sims[-1])
    pValues = fromCustomLogSpace(pValues)
    pValues[pValues<0] = 0
    pValues[pValues>1] = 1

    pValues = np.sort(1-pValues[np.logical_not(np.isnan(pValues))])
    return (np.linspace(0,1, pValues.shape[0]), pValues)
项目:mapnik-forest-change-tiles    作者:wri    | 项目源码 | 文件源码
def remap_band1(layer_type, date_conf_array, lookup_dict):

    band1_array = np.copy(date_conf_array)

    # Remap the band1_array to reflect int( total_days / 255 )
    if layer_type == 'glad':

        # create an empty array for our offsets
        offset = np.empty_like(band1_array)

        # populate it with the proper offset value depending on band1_array values
        offset[np.logical_and(band1_array >= 20000, band1_array < 30000)] = 20000
        offset[np.logical_and(a >= 30000, a < 40000)] = 30000

        # subtract them to remove the confidence digit (ten thousandths place)
        # and divide by 255
        band1_array = (band1_array - offset) / 255

    else:
        for k, v in lookup_dict.iteritems():
            band1_array[band1_array == k] = v[0]

    return band1_array
项目:epsilon_free_inference    作者:gpapamak    | 项目源码 | 文件源码
def __mul__(self, other):
        """Multiply with a single gaussian."""

        assert isinstance(other, Gaussian)

        ys = [x * other for x in self.xs]

        lcs = np.empty_like(self.a)

        for i, (x, y) in enumerate(izip(self.xs, ys)):

            lcs[i] = x.logdetP + other.logdetP - y.logdetP
            lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) + np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
            lcs[i] *= 0.5

        la = np.log(self.a) + lcs
        la -= scipy.misc.logsumexp(la)
        a = np.exp(la)

        return MoG(a=a, xs=ys)
项目:epsilon_free_inference    作者:gpapamak    | 项目源码 | 文件源码
def __div__(self, other):
        """Divide by a single gaussian."""

        assert isinstance(other, Gaussian)

        ys = [x / other for x in self.xs]

        lcs = np.empty_like(self.a)

        for i, (x, y) in enumerate(izip(self.xs, ys)):

            lcs[i] = x.logdetP - other.logdetP - y.logdetP
            lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) - np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
            lcs[i] *= 0.5

        la = np.log(self.a) + lcs
        la -= scipy.misc.logsumexp(la)
        a = np.exp(la)

        return MoG(a=a, xs=ys)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def standardDeviation2d(img, ksize=5, blurred=None):
    '''
    calculate the spatial resolved standard deviation 
    for a given 2d array

    ksize   -> kernel size

    blurred(optional) -> with same ksize gaussian filtered image
                      setting this parameter reduces processing time
    '''
    if ksize not in (list, tuple):
        ksize = (ksize,ksize)

    if blurred is None:
        blurred = gaussian_filter(img, ksize)
    else:
        assert blurred.shape == img.shape

    std = np.empty_like(img)

    _calc(img, ksize[0], ksize[1], blurred, std)

    return std
项目:Keras-GAN    作者:eriklindernoren    | 项目源码 | 文件源码
def mask_randomly(self, imgs):
        y1 = np.random.randint(0, self.img_rows - self.mask_height, imgs.shape[0])
        y2 = y1 + self.mask_height
        x1 = np.random.randint(0, self.img_rows - self.mask_width, imgs.shape[0])
        x2 = x1 + self.mask_width

        masked_imgs = np.empty_like(imgs)
        missing_parts = np.empty((imgs.shape[0], self.mask_height, self.mask_width, self.channels))
        for i, img in enumerate(imgs):
            masked_img = img.copy()
            _y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i]
            missing_parts[i] = masked_img[_y1:_y2, _x1:_x2, :].copy()
            masked_img[_y1:_y2, _x1:_x2, :] = 0
            masked_imgs[i] = masked_img

        return masked_imgs, missing_parts, (y1, y2, x1, x2)
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def __mul__(self, other):
        """Multiply with a single gaussian."""

        assert isinstance(other, Gaussian)

        ys = [x * other for x in self.xs]

        lcs = np.empty_like(self.a)

        for i, (x, y) in enumerate(izip(self.xs, ys)):

            lcs[i] = x.logdetP + other.logdetP - y.logdetP
            lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) + np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
            lcs[i] *= 0.5

        la = np.log(self.a) + lcs
        la -= scipy.misc.logsumexp(la)
        a = np.exp(la)

        return MoG(a=a, xs=ys)
项目:maf    作者:gpapamak    | 项目源码 | 文件源码
def __div__(self, other):
        """Divide by a single gaussian."""

        assert isinstance(other, Gaussian)

        ys = [x / other for x in self.xs]

        lcs = np.empty_like(self.a)

        for i, (x, y) in enumerate(izip(self.xs, ys)):

            lcs[i] = x.logdetP - other.logdetP - y.logdetP
            lcs[i] -= np.dot(x.m, np.dot(x.P, x.m)) - np.dot(other.m, np.dot(other.P, other.m)) - np.dot(y.m, np.dot(y.P, y.m))
            lcs[i] *= 0.5

        la = np.log(self.a) + lcs
        la -= scipy.misc.logsumexp(la)
        a = np.exp(la)

        return MoG(a=a, xs=ys)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def _apply_broadcast(self, func, axis):
        if axis == 0:
            target = self
        elif axis == 1:
            target = self.T
        else:  # pragma: no cover
            raise AssertionError('Axis must be 0 or 1, got %s' % axis)

        result_values = np.empty_like(target.values)
        columns = target.columns
        for i, col in enumerate(columns):
            result_values[:, i] = func(target[col])

        result = self._constructor(result_values, index=target.index,
                                   columns=target.columns)

        if axis == 1:
            result = result.T

        return result
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def _from_rotvec_array(rv):
    norm = np.linalg.norm(rv, axis=1)
    norm2 = norm ** 2
    norm4 = norm2 ** 2

    k1 = np.empty_like(norm2)
    k2 = np.empty_like(norm2)

    small = norm2 < 1e-6
    k1[small] = 1 - norm2[small] / 6 + norm4[small] / 120
    k2[small] = 0.5 - norm2[small] / 24 + norm4[small] / 720

    big = ~small
    k1[big] = np.sin(norm[big]) / norm[big]
    k2[big] = (1 - np.cos(norm[big])) / norm2[big]

    skew = _skew_matrix_array(rv)
    skew_squared = np.einsum('...ij,...jk->...ik', skew, skew)
    identity = np.empty_like(skew)
    identity[:] = np.identity(3)
    return (identity +
            k1[:, np.newaxis, np.newaxis] * skew +
            k2[:, np.newaxis, np.newaxis] * skew_squared)
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def _dtheta_from_omega_matrix(theta):
    norm = np.linalg.norm(theta, axis=1)
    k = np.empty_like(norm)

    mask = norm > 1e-4
    nm = norm[mask]
    k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
    mask = ~mask
    nm = norm[mask]
    k[mask] = 1/12 + 1/720 * nm**2

    A = np.empty((norm.shape[0], 3, 3))
    skew = _skew_matrix_array(theta)
    A[:] = np.identity(3)
    A[:] += 0.5 * skew
    A[:] += k[:, None, None] * util.mm_prod(skew, skew)

    return A
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def _omega_from_dtheta_matrix(theta):
    norm = np.linalg.norm(theta, axis=1)
    k1 = np.empty_like(norm)
    k2 = np.empty_like(norm)

    mask = norm > 1e-4
    nm = norm[mask]
    k1[mask] = (1 - np.cos(nm)) / nm**2
    k2[mask] = (nm - np.sin(nm)) / nm**3

    mask = ~mask
    nm = norm[mask]
    k1[mask] = 0.5 - nm**2 / 24
    k2[mask] = 1/6 - nm**2 / 120

    A = np.empty((norm.shape[0], 3, 3))
    skew = _skew_matrix_array(theta)
    A[:] = np.identity(3)
    A[:] -= k1[:, None, None] * skew
    A[:] += k2[:, None, None] * util.mm_prod(skew, skew)

    return A
项目:pyins    作者:nmayorov    | 项目源码 | 文件源码
def test_coning_sculling():
    # Basically a smoke test, because the function is quite simple.
    gyro = np.zeros((10, 3))
    gyro[:, 0] = 0.01
    gyro[:, 2] = -0.01

    accel = np.zeros((10, 3))
    accel[:, 2] = 0.1

    dv_true = np.empty_like(accel)
    dv_true[:, 0] = 0
    dv_true[:, 1] = -0.5e-3
    dv_true[:, 2] = 0.1
    theta, dv = coning_sculling(gyro, accel)
    assert_allclose(theta, gyro, rtol=1e-10)
    assert_allclose(dv, dv_true, rtol=1e-10)
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def residual(r,theta,u,d):
    out = np.empty_like(u)
    out[0] = (2*np.sin(theta)*r*d(u[0],1,0)
              + r*r*np.sin(theta)*d(u[0],2,0)
              + np.cos(theta)*d(u[0],0,1)
              + np.sin(theta)*d(u[1],0,2))
    out[1] = (2*np.sin(theta)*r*d(u[1],1,0)
              + r*r*np.sin(theta)*d(u[1],2,0)
              + np.cos(theta)*d(u[1],0,1)
              + np.sin(theta)*d(u[1],0,2))
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def DEFAULT_BDRY_THETA_MIN(r,u,d):
    ushape = u.shape
    num_vars = np.prod(ushape[:-2])
    uflat = u.reshape(tuple([num_vars])+ushape[-2:])
    out = np.empty_like(uflat)
    for i in range(out.shape[0]):
        out[i] = d(uflat[i],0,1)
    out.reshape(ushape)
    return out
项目:pyballd    作者:Yurlungur    | 项目源码 | 文件源码
def DEFAULT_BDRY_THETA_MAX(r,u,d):
    ushape = u.shape
    num_vars = np.prod(ushape[:-2])
    uflat = u.reshape(tuple([num_vars])+ushape[-2:])
    out = np.empty_like(uflat)
    for i in range(out.shape[0]):
        out[i] = d(uflat[i],0,1)
    out.reshape(ushape)
    return out
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def dsdt(t, s, params):
    """Wrapper for system derivative with respect to time"""
    derivs = np.empty_like(s)
    eps1,eps2,perturb_params,p_lambda1,p_lambda2,p_k1,p_k2,p_f,p_m1,p_m2,p_lambdaE,p_bE,p_Kb,p_d_E,p_Kd = params 
    dsdt_(derivs, s, t, eps1, eps2, perturb_params, p_lambda1, p_lambda2, p_k1, \
    p_k2, p_f, p_m1, p_m2, p_lambdaE, p_bE, p_Kb, p_d_E, p_Kd)
    return derivs
项目:hip-mdp-public    作者:dtak    | 项目源码 | 文件源码
def dsdt(self, s_augmented, t):
        derivs = np.empty_like(s_augmented)
        self._dsdt(derivs, s_augmented, t)
        return derivs
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def log_loss_value_from_scores(weights, total_weights, scores):
    """
    computes the logistic loss value from a vector of scores in a numerically stable way
    where scores = Z.dot(rho)

    see also: http://stackoverflow.com/questions/20085768/

    this function is used for heuristics (discrete_descent, sequential_rounding).
    to save computation when running the heuristics, we store the scores and
    call this function to compute the loss directly from the scores
    this reduces the need to recompute the dot product.

    Parameters
    ----------
    scores          numpy.array of scores = Z.dot(rho)
    total_weights   numpy.sum(total_weights) (only included to reduce computation)
    weights         numpy.array of sample weights with shape (n_rows,)

    Returns
    -------
    loss_value  scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))

    """
    pos_idx = scores > 0
    loss_value = np.empty_like(scores)
    loss_value[pos_idx] = np.log1p(np.exp(-scores[pos_idx]))
    loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(np.exp(scores[~pos_idx]))
    loss_value = loss_value.dot(weights) / total_weights

    return loss_value
项目:risk-slim    作者:ustunb    | 项目源码 | 文件源码
def log_loss_value_and_slope(Z, rho):
    """
    computes the value and slope of the logistic loss in a numerically stable way
    this function should only be used when generating cuts in cutting-plane algorithms
    (computing both the value and the slope at the same time is slightly cheaper)

    see also: http://stackoverflow.com/questions/20085768/

    Parameters
    ----------
    Z           numpy.array containing training data with shape = (n_rows, n_cols)
    rho         numpy.array of coefficients with shape = (n_cols,)

    Returns
    -------
    loss_value  scalar = 1/n_rows * sum(log( 1 .+ exp(-Z*rho))
    loss_slope: (n_cols x 1) vector = 1/n_rows * sum(-Z*rho ./ (1+exp(-Z*rho))

    """
    scores = Z.dot(rho)
    pos_idx = scores > 0
    exp_scores_pos = np.exp(-scores[pos_idx])
    exp_scores_neg = np.exp(scores[~pos_idx])

    #compute loss value
    loss_value = np.empty_like(scores)
    loss_value[pos_idx] = np.log1p(exp_scores_pos)
    loss_value[~pos_idx] = -scores[~pos_idx] + np.log1p(exp_scores_neg)
    loss_value = loss_value.mean()

    #compute loss slope
    log_probs = np.empty_like(scores)
    log_probs[pos_idx]  = 1.0 / (1.0 + exp_scores_pos)
    log_probs[~pos_idx] = exp_scores_neg / (1.0 + exp_scores_neg)
    loss_slope = Z.T.dot(log_probs - 1.0) / Z.shape[0]

    return loss_value, loss_slope
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def _ecdf_formal(x, data):
    """
    Compute the values of the formal ECDF generated from `data` at x.
    I.e., if F is the ECDF, return F(x).

    Parameters
    ----------
    x : array_like
        Positions at which the formal ECDF is to be evaluated.
    data : array_like
        *Sorted* data set to use to generate the ECDF.

    Returns
    -------
    output : float or ndarray
        Value of the ECDF at `x`.
    """
    output = np.empty_like(x)

    for i, x_val in enumerate(x):
        j = 0
        while j < len(data) and x_val >= data[j]:
            j += 1

        output[i] = j

    return output / len(data)