Python scipy.optimize 模块,approx_fprime() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用scipy.optimize.approx_fprime()

项目:soft-dtw    作者:mblondel    | 项目源码 | 文件源码
def test_soft_dtw_grad_X():
    def make_func(gamma):
        def func(x):
            X_ = x.reshape(*X.shape)
            D_ = SquaredEuclidean(X_, Y)
            return SoftDTW(D_, gamma).compute()
        return func

    for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
        dist = SquaredEuclidean(X, Y)
        sdtw = SoftDTW(dist, gamma)
        sdtw.compute()
        E = sdtw.grad()
        G = dist.jacobian_product(E)

        func = make_func(gamma)
        G_num = approx_fprime(X.ravel(), func, 1e-6).reshape(*G.shape)
        assert_array_almost_equal(G, G_num, 5)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def test_std_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    _, _, _, std_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True,
        return_std_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[1], 1e-4)
    assert_array_almost_equal(std_grad, num_grad, decimal=3)
项目:PorousMediaLab    作者:biogeochemistry    | 项目源码 | 文件源码
def _jac(self, x, *args):
        return spo.approx_fprime(x, self._diff_pos_neg, 1e-2, *args)
项目:pyglmnet    作者:glm-tools    | 项目源码 | 文件源码
def test_gradients():
    """Test gradient accuracy."""
    # data
    scaler = StandardScaler()
    n_samples, n_features = 1000, 100
    X = np.random.normal(0.0, 1.0, [n_samples, n_features])
    X = scaler.fit_transform(X)

    density = 0.1
    beta_ = np.zeros(n_features + 1)
    beta_[0] = np.random.rand()
    beta_[1:] = sps.rand(n_features, 1, density=density).toarray()[:, 0]

    reg_lambda = 0.1
    distrs = ['gaussian', 'binomial', 'softplus', 'poisson', 'probit', 'gamma']
    for distr in distrs:
        glm = GLM(distr=distr, reg_lambda=reg_lambda)
        y = simulate_glm(glm.distr, beta_[0], beta_[1:], X)

        func = partial(_L2loss, distr, glm.alpha,
                       glm.Tau, reg_lambda, X, y, glm.eta, glm.group)
        grad = partial(_grad_L2loss, distr, glm.alpha, glm.Tau,
                       reg_lambda, X, y,
                       glm.eta)
        approx_grad = approx_fprime(beta_, func, 1.5e-8)
        analytical_grad = grad(beta_)
        assert_allclose(approx_grad, analytical_grad, rtol=1e-5, atol=1e-3)
项目:soft-dtw    作者:mblondel    | 项目源码 | 文件源码
def test_soft_dtw_grad():
    def make_func(gamma):
        def func(d):
            D_ = d.reshape(*D.shape)
            return SoftDTW(D_, gamma).compute()
        return func

    for gamma in (0.001, 0.01, 0.1, 1, 10, 100, 1000):
        sdtw = SoftDTW(D, gamma)
        sdtw.compute()
        E = sdtw.grad()
        func = make_func(gamma)
        E_num = approx_fprime(D.ravel(), func, 1e-6).reshape(*E.shape)
        assert_array_almost_equal(E, E_num, 5)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def check_gradient_correctness(X_new, model, acq_func, y_opt):
    analytic_grad = gaussian_acquisition_1D(
        X_new, model, y_opt, acq_func)[1]
    num_grad_func = lambda x:  gaussian_acquisition_1D(
        x, model, y_opt, acq_func=acq_func)[0]
    num_grad = optimize.approx_fprime(X_new, num_grad_func, 1e-5)
    assert_array_almost_equal(analytic_grad, num_grad, 3)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def numerical_gradient(X, Y, kernel, step_size=1e-4):
    grad = []
    for y in Y:
        num_grad = optimize.approx_fprime(X, kernel_X_Y, step_size, y, kernel)
        grad.append(num_grad)
    return np.asarray(grad)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3)
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_logistic_loss_and_grad():
    X_ref, y = make_classification(n_samples=20)
    n_features = X_ref.shape[1]

    X_sp = X_ref.copy()
    X_sp[X_sp < .1] = 0
    X_sp = sp.csr_matrix(X_sp)
    for X in (X_ref, X_sp):
        w = np.zeros(n_features)

        # First check that our derivation of the grad is correct
        loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
            )
        assert_array_almost_equal(grad, approx_grad, decimal=2)

        # Second check that our intercept implementation is good
        w = np.zeros(n_features + 1)
        loss_interp, grad_interp = _logistic_loss_and_grad(
            w, X, y, alpha=1.
            )
        assert_array_almost_equal(loss, loss_interp)

        approx_grad = optimize.approx_fprime(
            w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
            )
        assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def testGradientExactAndApproxAgree__objFunc_constrained(self):
        ''' Verify computed gradient similar for exact and approx methods
        '''
        print ''
        for K in [1, 10, 107]:
            for alpha in [0.1, 0.95]:
                for gamma in [1., 3.14, 9.45]:
                    for seed in [111, 222, 333]:
                        PRNG = np.random.RandomState(seed)
                        rho = PRNG.rand(K)
                        omega = 100 * PRNG.rand(K)
                        rhoomega = np.hstack([rho, omega])
                        kwargs = dict(alpha=alpha,
                                      gamma=gamma,
                                      nDoc=0,
                                      sumLogPi=np.zeros(K + 1))

                        # Exact gradient
                        _, g = OptimizerRhoOmega.objFunc_constrained(
                            rhoomega, approx_grad=0, **kwargs)
                        # Numerical gradient
                        objFunc_cons = OptimizerRhoOmega.objFunc_constrained
                        objFunc = lambda x: objFunc_cons(
                            x, approx_grad=1, **kwargs)
                        epsvec = np.hstack(
                            [1e-8 * np.ones(K), 1e-8 * np.ones(K)])
                        gapprox = approx_fprime(rhoomega, objFunc, epsvec)

                        print '      rho 1:10 ', np2flatstr(rho)
                        print '     grad 1:10 ', np2flatstr(g[:K], fmt='% .6e')
                        print '     grad 1:10 ', np2flatstr(
                            gapprox[:K], fmt='% .6e')
                        if K > 10:
                            print '    rho K-10:K ', np2flatstr(rho[-10:])
                            print '   grad K-10:K ', np2flatstr(
                                g[K - 10:K], fmt='% .6e')
                            print 'gapprox K-10:K ', np2flatstr(
                                gapprox[K - 10:K], fmt='% .6e')
                        assert np.allclose(g[:K],
                                           gapprox[:K],
                                           atol=1e-6,
                                           rtol=0.01)

                        print np2flatstr(g[K:])
                        print np2flatstr(gapprox[K:])
                        assert np.allclose(g[K:],
                                           gapprox[K:],
                                           atol=1e-4,
                                           rtol=0.05)
项目:bnpy    作者:bnpy    | 项目源码 | 文件源码
def testGradientExactAndApproxAgree__objFunc_constrained(self,
                                                             hmmKappa=100):
        ''' Verify computed gradient similar for exact and approx methods
        '''
        print ''
        for K in [1, 2, 10]:
            for gamma in [1.0, 2.0, 6.28]:
                for alpha in [0.1, 0.9, 1.5]:
                    for seed in [333, 777, 888]:

                        PRNG = np.random.RandomState(seed)
                        u = np.linspace(0.1, 0.9, K)
                        Vd = sampleVd(u, K + 1, alpha, PRNG=PRNG)
                        sumLogPi = summarizeVdToPi(Vd)

                        # Randomly initialize rho and omega
                        rho = PRNG.rand(K)
                        omega = K * PRNG.rand(K)
                        rhoomega = np.hstack([rho, omega])

                        kwargs = dict(alpha=alpha,
                                      gamma=1,
                                      nDoc=K + 1,
                                      kappa=hmmKappa,
                                      sumLogPi=sumLogPi)

                        # Exact gradient
                        f, g = OptimizerRhoOmega.objFunc_constrained(
                            rhoomega, approx_grad=0, **kwargs)

                        # Approx gradient
                        oFunc_cons = OptimizerRhoOmega.objFunc_constrained

                        def objFunc(x):
                            return oFunc_cons(x, approx_grad=1, **kwargs)
                        epsvec = np.hstack(
                            [1e-8 * np.ones(K), 1e-8 * np.ones(K)])
                        gapprox = approx_fprime(rhoomega, objFunc, epsvec)

                        print np2flatstr(g)
                        print np2flatstr(gapprox)
                        print ''
                        assert np.allclose(g, gapprox, atol=0, rtol=0.001)