Python scipy 模块,optimize() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用scipy.optimize()

项目:KDDCUP2016    作者:hugochan    | 项目源码 | 文件源码
def init(self, init_points, return_log):
                '''A function to perform all initialization and clear the optimize methods - To be constructed'''

                if self.randomstate != None:
                        numpy.random.seed(self.randomstate)

                print('Optimization procedure is initializing at %i random points.' % init_points)

                #Sampling some points are random to define xtrain.
                xtrain = numpy.asarray([numpy.random.uniform(x[0], x[1], size = init_points) for x in self.log_bounds]).T
                ytrain = []
                for x in xtrain : 
                    ytrain.append(self.f(dict(zip(self.keys, return_log(x)))))
                    print('%d points initialized.' % len(ytrain))

                ytrain = numpy.asarray(ytrain)

                print('Optimization procedure is done initializing.')

                return xtrain, ytrain

        # ----------------------- // ----------------------- # ----------------------- // ----------------------- #
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
项目:cuicuilco    作者:AlbertoEsc    | 项目源码 | 文件源码
def non_linear_inverse(self, y, verbose=False):
        """Non-linear inverse approximation method. """
        x_lin = self.linear_inverse(y)
        rmse_lin = ((y - self.execute(x_lin)) ** 2).sum(axis=1).mean() ** 0.5
        # scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08,
        # xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None)
        x_nl = numpy.zeros_like(x_lin)
        y_dim = y.shape[1]
        x_dim = x_lin.shape[1]
        if y_dim < x_dim:
            num_zeros_filling = x_dim - y_dim
        else:
            num_zeros_filling = 0
        if verbose:
            print("x_dim=", x_dim, "y_dim=", y_dim, "num_zeros_filling=", num_zeros_filling)
        y_long = numpy.zeros(y_dim + num_zeros_filling)

        for i, y_i in enumerate(y):
            y_long[0:y_dim] = y_i
            if verbose:
                print("x_0=", x_lin[i])
                print("y_long=", y_long)
            plsq = scipy.optimize.leastsq(func=f_residual, x0=x_lin[i], args=(self, y_long), full_output=False)
            x_nl_i = plsq[0]
            if verbose:
                print("x_nl_i=", x_nl_i, "plsq[1]=", plsq[1])
            if plsq[1] != 2:
                print("Quitting: plsq[1]=", plsq[1])
                # quit()
            x_nl[i] = x_nl_i
            print("|E_lin(%d)|=" % i, ((y_i - self.execute(x_lin[i].reshape((1, -1)))) ** 2).sum() ** 0.5)
            print("|E_nl(%d)|=" % i, ((y_i - self.execute(x_nl_i.reshape((1, -1)))) ** 2).sum() ** 0.5)
        rmse_nl = ((y - self.execute(x_nl)) ** 2).sum(axis=1).mean() ** 0.5
        print("rmse_lin(all samples)=", rmse_lin, "rmse_nl(all samples)=", rmse_nl)
        return x_nl
项目:cuicuilco    作者:AlbertoEsc    | 项目源码 | 文件源码
def invert_exp_funcs2(exp_x_noisy, dim_x, exp_funcs, distance=sfa_libs.distance_best_squared_Euclidean,
                      use_hint=False, max_steady_factor=5, delta_factor=0.7, min_delta=0.0001, k=0.5, verbose=False):
    """ Function that approximates a preimage of exp_x_noisy notice 
    that distance, max_steady_factor, delta, min_delta are deprecated and useless
    """
    num_samples = exp_x_noisy.shape[0]

    if isinstance(use_hint, numpy.ndarray):
        if verbose:
            print("Using suggested approximation!")
        app_x = use_hint.copy()
    elif use_hint:
        if verbose:
            print("Using lowest dim_x=%d elements of input for first approximation!" % (dim_x))
        app_x = exp_x_noisy[:, 0:dim_x].copy()
    else:
        app_x = numpy.random.normal(size=(num_samples, dim_x))

    for row in range(num_samples):
        # app_x_row = app_x[row].reshape(1, dim_x)
        # exp_x_noisy_row = exp_x_noisy[row].reshape(1, dim_exp_x)
        # app_exp_x_row = app_exp_x[row].reshape(1, dim_exp_x)
        # Definition:       scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0,
        #                                         ftol=1.49012e-08, xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0,
        #                                         factor=100, diag=None, warning=True)
        plsq = scipy.optimize.leastsq(residuals, app_x[row], args=(exp_x_noisy[row], exp_funcs, app_x[row], k),
                                      ftol=1.49012e-06, xtol=1.49012e-06, gtol=0.0, maxfev=50*dim_x, epsfcn=0.0,
                                      factor=1.0)
        app_x[row] = plsq[0]

    app_exp_x = sfa_libs.apply_funcs_to_signal(exp_funcs, app_x)
    return app_x, app_exp_x
项目:cuicuilco    作者:AlbertoEsc    | 项目源码 | 文件源码
def non_linear_inverse(self, y, verbose=False):
        x_lin = self.linear_inverse(y)
        rmse_lin = ((y - self.execute(x_lin)) ** 2).sum(axis=1).mean() ** 0.5
        #        scipy.optimize.leastsq(func, x0, args=(), Dfun=None, full_output=0, col_deriv=0, ftol=1.49012e-08,
        # xtol=1.49012e-08, gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None)
        x_nl = numpy.zeros_like(x_lin)
        y_dim = y.shape[1]
        x_dim = x_lin.shape[1]
        if y_dim < x_dim:
            num_zeros_filling = x_dim - y_dim
        else:
            num_zeros_filling = 0
        if verbose:
            print("x_dim=", x_dim, "y_dim=", y_dim, "num_zeros_filling=", num_zeros_filling)
        y_long = numpy.zeros(y_dim + num_zeros_filling)

        for i, y_i in enumerate(y):
            y_long[0:y_dim] = y_i
            if verbose:
                print("x_0=", x_lin[i])
                print("y_long=", y_long)
            plsq = scipy.optimize.leastsq(func=f_residual, x0=x_lin[i], args=(self, y_long), full_output=False)
            x_nl_i = plsq[0]
            if verbose:
                print("x_nl_i=", x_nl_i, "plsq[1]=", plsq[1])
            if plsq[1] != 2:
                print("Quitting: plsq[1]=", plsq[1])
                # quit()
            x_nl[i] = x_nl_i
            print("|E_lin(%d)|=" % i, ((y_i - self.execute(x_lin[i].reshape((1, -1)))) ** 2).sum() ** 0.5)
            print("|E_nl(%d)|=" % i, ((y_i - self.execute(x_nl_i.reshape((1, -1)))) ** 2).sum() ** 0.5)
        rmse_nl = ((y - self.execute(x_nl)) ** 2).sum(axis=1).mean() ** 0.5
        print("rmse_lin(all samples)=", rmse_lin, "rmse_nl(all samples)=", rmse_nl)
        return x_nl
项目:fitdadi    作者:LohmuellerLab    | 项目源码 | 文件源码
def optimize_log(p0, data, model_func, sel_dist, theta, lower_bound=None, 
                 upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
                 gtol=1e-5, multinom=False, maxiter=None, full_output=False,
                 func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
                 output_file=None):

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func_log, 
                                       numpy.log(p0), epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(numpy.exp(xopt), fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag
项目:fitdadi    作者:LohmuellerLab    | 项目源码 | 文件源码
def optimize(p0, data, model_func, sel_dist, theta, lower_bound=None,
             upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
             gtol=1e-5, multinom=False, maxiter=None, full_output=False,
             func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
             output_file=None):
    """
    optimizer for use with distributions where log transformations do not work,
    e.g. when gamma is positive and negative
    """
    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound, 
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = _project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func, p0, 
                                       epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(xopt, fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return xopt
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag

##end of dadi.Inference code
项目:fitdadi    作者:LohmuellerLab    | 项目源码 | 文件源码
def optimize_log(p0, data, model_func, sel_dist, theta, lower_bound=None, 
                 upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-3, 
                 gtol=1e-5, multinom=False, maxiter=None, full_output=False,
                 func_args=[], func_kwargs={}, fixed_params=None, ll_scale=1,
                 output_file=None):

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)
    outputs = scipy.optimize.fmin_bfgs(_object_func_log, 
                                       numpy.log(p0), epsilon=epsilon,
                                       args = args, gtol=gtol, 
                                       full_output=True,
                                       disp=False,
                                       maxiter=maxiter)
    xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(numpy.exp(xopt), fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, gopt, Bopt, func_calls, grad_calls, warnflag
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def optimize(self):
        if self._optimize is None:
            try:
                import scipy.optimize as optimize
            except ImportError:
                optimize = NotAModule(self._name)
            self._optimize = optimize
        return self._optimize
项目:cas-eval    作者:varepsilon    | 项目源码 | 文件源码
def train(self, data):
        reg_weight = self.regularization_weight()

        def f(theta):
            ll = 0
            for d in data:
                session = d['session']
                if DEBUG:
                    assert len(session) > 5
                    assert len(session) < 15
                ll += self.log_likelihood(theta, session, d['serp'], d['sat'], f_only=True).full
            N = len(data)
            reg_term = 0.5 * self.reg_coeff / N * np.multiply(reg_weight, theta).dot(theta)
            if DEBUG:
                self.debug_theta(theta)
                print 'mean LL = %f, reg_term = %f, N = %d' % (ll/N, reg_term, N)
            return -ll / N + reg_term

        def fprime(theta):
            ll_prime = np.zeros(self.num_features)
            for d in data:
                ll_prime += self.log_likelihood(theta, d['session'], d['serp'], d['sat']).gaussian
            N = len(data)
            return -ll_prime / N + self.reg_coeff / N * np.multiply(reg_weight, theta)

        theta0 = self.initial_guess()
        opt_res = scipy.optimize.minimize(f, theta0, method='L-BFGS-B', jac=fprime, options=dict(maxiter=100))
        return opt_res.x
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(
                tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(
                inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            ret = f_opt(*inputs)
            return ret

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(
                trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
项目:pyaeroopt    作者:mjzahr    | 项目源码 | 文件源码
def optimize(self,solver,sens='finite-diff',options=None,callback=None):

        if 'pyopt' in solver:
            xStar,fStar = self.optimizePyopt(solver,sens,options)
        elif 'scipy' in solver:
            xStar,fStar = self.optimizeScipy(solver,sens,options,callback)
        elif 'nlopt' in solver:
            xStar,fStar = self.optimizeNlopt(solver,sens,options)
        elif 'openopt' in solver:
            xStar,fStar = self.optimizeOpenopt(solver,sens,options)

        return ( xStar, fStar )

    #TODO: test
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def find_s0(self):
        """Find the smallest resolvable scale by finding where the
        equivalent fourier period is equal to 2 * dt. For a Morlet
        wavelet, this is roughly 1.
        """
        dt = self.dt

        def f(s):
            return self.fourier_period(s) - 2 * dt
        return scipy.optimize.fsolve(f, 1)[0]
项目:WaveletQuotes    作者:JobyKK    | 项目源码 | 文件源码
def find_s0(self):
        """Find the smallest resolvable scale by finding where the
        equivalent fourier period is equal to 2 * dt. For a Morlet
        wavelet, this is roughly 1.
        """
        dt = self.dt

        def f(s):
            return self.fourier_period(s) - 2 * dt
        return scipy.optimize.fsolve(f, 1)[0]
项目:LambdaMart    作者:lezzago    | 项目源码 | 文件源码
def find_splits_parallel(args):
    var_space, label, col = args
    # var_space = data.iloc[:,col].tolist()
    return scipy.optimize.fminbound(error_function, min(var_space), max(var_space), args = (col, var_space, label), full_output = 1)
    # return, 
    # if not min_error or error < min_error:
    #   min_error = error
    #   split_var = col
    #   min_split = split
项目:rllab    作者:rll    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
项目:rllab    作者:rll    | 项目源码 | 文件源码
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
项目:rllab    作者:rll    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
项目:rllab    作者:rll    | 项目源码 | 文件源码
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
项目:rllab    作者:rll    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
项目:GRIPy    作者:giruenf    | 项目源码 | 文件源码
def find_s0(self):
        """Find the smallest resolvable scale by finding where the
        equivalent Fourier period is equal to 2 * dt. For a Morlet
        wavelet, this is roughly 1.
        """
        dt = self.dt

        def f(s):
            return self.fourier_period(s) - 2 * dt
        return scipy.optimize.fsolve(f, 1)[0]
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
        this will be computed by calling theano.grad
        :return: No return value.
        """

        self._target = target

        def get_opt_output(gradients):
            if gradients is None:
                gradients = theano.grad(loss, target.get_params(trainable=True))
            flat_grad = flatten_tensor_variables(gradients)
            return [loss.astype('float64'), flat_grad.astype('float64')]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(gradients),
            )
        )
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def optimize(self, inputs, extra_inputs=None):
        f_opt = self._opt_fun["f_opt"]

        if extra_inputs is None:
            extra_inputs = list()

        def f_opt_wrapper(flat_params):
            self._target.set_param_values(flat_params, trainable=True)
            return f_opt(*inputs)

        itr = [0]
        start_time = time.time()

        if self._callback:
            def opt_callback(params):
                loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
                elapsed = time.time() - start_time
                self._callback(dict(
                    loss=loss,
                    params=params,
                    itr=itr[0],
                    elapsed=elapsed,
                ))
                itr[0] += 1
        else:
            opt_callback = None

        scipy.optimize.fmin_l_bfgs_b(
            func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
            maxiter=self._max_opt_itr, callback=opt_callback,
        )
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        penalty_var = TT.scalar("penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            flat_grad = flatten_tensor_variables(theano.grad(
                penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
            ))
            return [penalized_loss.astype('float64'), flat_grad.astype('float64')]

        self._opt_fun = lazydict(
            f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
                log_name="f_opt"
            )
        )
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def __init__(
            self,
            epsilon=0.5,
            L2_reg_dual=0.,  # 1e-5,
            L2_reg_loss=0.,
            max_opt_itr=50,
            optimizer=scipy.optimize.fmin_l_bfgs_b,
            **kwargs):
        """

        :param epsilon: Max KL divergence between new policy and old policy.
        :param L2_reg_dual: Dual regularization
        :param L2_reg_loss: Loss regularization
        :param max_opt_itr: Maximum number of batch optimization iterations.
        :param optimizer: Module path to the optimizer. It must support the same interface as
        scipy.optimize.fmin_l_bfgs_b.
        :return:
        """
        Serializable.quick_init(self, locals())
        super(REPS, self).__init__(**kwargs)
        self.epsilon = epsilon
        self.L2_reg_dual = L2_reg_dual
        self.L2_reg_loss = L2_reg_loss
        self.max_opt_itr = max_opt_itr
        self.optimizer = optimizer
        self.opt_info = None
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def update_opt(self, loss, target, inputs, extra_inputs=None, *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """

        self._target = target

        def get_opt_output():
            flat_grad = tensor_utils.flatten_tensor_variables(tf.gradients(loss, target.get_params(trainable=True)))
            return [tf.cast(loss, tf.float64), tf.cast(flat_grad, tf.float64)]

        if extra_inputs is None:
            extra_inputs = list()

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs + extra_inputs, loss),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + extra_inputs,
                outputs=get_opt_output(),
            )
        )
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def optimize_source_scale_gauss(img_data,img_std,mu_objs,cov_objs,optimizer='curve_fit',verbose=True):
    """
    optimize the (flux) scaling of an image by scaling each individual source (assumed to be a
    multi-variate Gaussian with mu and covariance) with respect to a (noisy) data image

    --- INPUT ---
    img_data        The (noisy) data image to scale model image provide in img_model to
    img_std         Standard deviation image for data to use in optimization
    mu_objs         Mean vectors for multivariate Gaussian sources to scale         Dimensions: [Nobj,2]
    cov_objs        Covariance matrixes for multivariate Gaussian sources to scale. Dimensions: [Nobj,2,2]
    optimizer       The optimizer to use when scaling the sources
    verbose         Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_model_cube as tmc
    scale, cov = tmc.optimize_img_scale()

    """
    if verbose: print ' - Optimize residual between model (multiple Gaussians) and data with least squares in 2D'
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    if verbose: print '   ----------- Started on '+tu.get_now_string()+' ----------- '
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if optimizer == 'leastsq':
        sys.exit('optimizer = "leastsq" not enabled')
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    elif optimizer == 'curve_fit':
        scales_initial_guess   = np.ones(mu_objs.shape[0])
        imgsize                = img_data.shape
        xgrid, ygrid           = tu.gen_gridcomponents(imgsize)
        with warnings.catch_warnings():
            #warnings.simplefilter("ignore")
            scale_best, scale_cov  = opt.curve_fit(lambda (xgrid, ygrid), *scales:
                                                   curve_fit_fct_wrapper_sourcefit((xgrid, ygrid),mu_objs,
                                                                                   cov_objs,*scales),(xgrid, ygrid),
                                                   img_data.ravel(), p0 = scales_initial_guess, sigma=img_std.ravel() )
        output = scale_best, scale_cov
    else:
        sys.exit(' ---> Invalid optimizer ('+optimizer+') chosen for optimize_source_scale_gauss()')
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print '   ----------- Finished on '+tu.get_now_string()+' ----------- '
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    return output
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def optimize_img_scale(img_data,img_std,img_model,optimizer='curve_fit',show_residualimg=False,verbose=True):
    """
    optimize the (flux) scaling of an image with respect to a (noisy) data image

    --- INPUT ---
    img_data            The (noisy) data image to scale model image provide in img_model to
    img_std             Standard deviation image for data to use in optimization
    img_model           Model image to (flux) scale to match img_data
    optimizer           The optimizer to use when scaling the layers
    show_residualimg    To show the residual image (data - model) for the optimize layers, set this to true
    verbose             Toggle verbosity

    --- EXAMPLE OF USE ---
    import tdose_model_cube as tmc
    scale, cov = tmc.optimize_img_scale()

    """
    if verbose: print ' - Optimize residual between model (multiple Gaussians) and data with least squares in 2D'
    if verbose: print '   ----------- Started on '+tu.get_now_string()+' ----------- '
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if optimizer == 'leastsq':
        sys.exit('optimizer = "leastsq" no enabled')
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    elif optimizer == 'curve_fit':
        imgsize                = img_data.shape
        xgrid, ygrid           = tu.gen_gridcomponents(imgsize)
        scale_best, scale_cov  = opt.curve_fit(lambda (xgrid, ygrid), scale:
                                               tmc.curve_fit_fct_wrapper_imgscale((xgrid, ygrid), scale, img_model),
                                               (xgrid, ygrid),
                                               img_data.ravel(), p0 = [1.0], sigma=img_std.ravel() )

        output = scale_best, scale_cov
    else:
        sys.exit(' ---> Invalid optimizer ('+optimizer+') chosen in optimize_img_scale()')
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if verbose: print '   ----------- Finished on '+tu.get_now_string()+' ----------- '
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    if show_residualimg:
        if verbose: print ' - Displaying the residual image between data and scaled model image '
        res_img  = img_model-img_data
        plt.imshow(res_img,interpolation='none', vmin=1e-5, vmax=np.max(res_img), norm=mpl.colors.LogNorm())
        plt.title('Initial Residual = Initial Model Image - Data Image')
        plt.show()

        res_img  = img_model*scale_best-img_data
        plt.imshow(res_img,interpolation='none', vmin=1e-5, vmax=np.max(res_img), norm=mpl.colors.LogNorm())
        plt.title('Best Residual = Scaled (by '+str(scale_best)+') Model Image - Data Image')
        plt.show()
    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    return output
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        with tf.variable_scope(self._name):
            penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            params = target.get_params(trainable=True)
            grads = tf.gradients(penalized_loss, params)
            for idx, (grad, param) in enumerate(zip(grads, params)):
                if grad is None:
                    grads[idx] = tf.zeros_like(param)
            flat_grad = tensor_utils.flatten_tensor_variables(grads)
            return [
                tf.cast(penalized_loss, tf.float64),
                tf.cast(flat_grad, tf.float64),
            ]

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: tensor_utils.compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
            )
        )
项目:model_sweeper    作者:akimovmike    | 项目源码 | 文件源码
def train_models(X, y, n_folds):
    system_cores = # call system get cores number - 1

    model_zoo = {
    'logreg':{
        model_class = 'logreg'; # todo: correct class name
        can_parallel = True;
        param_dict = {'C':[.01,10]}
        };
    'rf':{
        model_class = 'rf'; # todo: correct class name
        can_parallel = True;
        param_dict = {'max_tree_depth':[5,100]
        # todo: other hyperparams
        };
    # todo: other models        
    }

    trained_models = []

    # call parallel n_jobs = system_cores/n_folds 
    #    for model in model_zoo:
    #        model_instance = model_instance(model_zoo_parallel[model])
    #        model_instance.train(X, y)
    #        trained_models.append(model_instance)

    return trained_models

def predict(trained_models, X):
    preds = {}    
    for model in trained_models:
        preds[model.name] = model.predict(X)
    return preds

def predict_proba(trained_models, X):
    preds = {}    
    for model in trained_models:
        preds[model.name] = model.predict_proba(X)
    return preds

def blend_preds(preds):
    # preds_df = convert preds to DF
    # check dispersion of preds
    # drop similar preds
    # preds_blend = preds_df.gmean(axis=1)
    return preds_blend

def blend_preds_weighted(preds, X, y):
    # check dispersion of preds
    # drop similar preds
    # train blender model/scipy.optimize(X,y)
    # weighted_avg = predict by blender model from filtered preds
    return weighted_avg
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        with tf.variable_scope(self._name):
            penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            params = target.get_params(trainable=True)
            grads = tf.gradients(penalized_loss, params)
            for idx, (grad, param) in enumerate(zip(grads, params)):
                if grad is None:
                    grads[idx] = tf.zeros_like(param)
            flat_grad = tensor_utils.flatten_tensor_variables(grads)
            return [
                tf.cast(penalized_loss, tf.float64),
                tf.cast(flat_grad, tf.float64),
            ]

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: tensor_utils.compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
            )
        )
项目:fitdadi    作者:LohmuellerLab    | 项目源码 | 文件源码
def optimize_cons(p0, data, model_func, sel_dist, theta, lower_bound=None,
                  upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-4,
                  constraint=None, gtol=1e-6, multinom=False, maxiter=None,
                  full_output=False, func_args=[], func_kwargs={},
                  fixed_params=None, ll_scale=1, output_file=None):
    """
    Constrained optimization needs a constraint function and bounds. 
    """

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    if not (lower_bound is None):
        lower_bound_a = lower_bound + [0]
    if not (upper_bound is None):
        upper_bound_a = upper_bound + [numpy.inf]

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)

    ####make sure to define consfunc and bnds ####
    if (not lower_bound is None) and (not upper_bound is None):
        bnds = tuple((x,y) for x,y in zip(lower_bound,upper_bound))
    outputs = scipy.optimize.fmin_slsqp(_object_func, 
                                       p0, bounds=bnds, args=args, 
                                       f_eqcons=constraint, epsilon=epsilon, 
                                       iter=maxiter,full_output=True,
                                       disp=False)
    xopt, fopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(xopt, fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, func_calls, grad_calls, warnflag
项目:fitdadi    作者:LohmuellerLab    | 项目源码 | 文件源码
def optimize_cons(p0, data, model_func, sel_dist, theta, lower_bound=None,
                  upper_bound=None, verbose=0, flush_delay=0.5, epsilon=1e-4,
                  constraint=None, gtol=1e-6, multinom=False, maxiter=None,
                  full_output=False, func_args=[], func_kwargs={},
                  fixed_params=None, ll_scale=1, output_file=None):
    """
    Constrained optimization needs a constraint function and bounds. 
    """

    if output_file:
        output_stream = file(output_file, 'w')
    else:
        output_stream = sys.stdout

    if not (lower_bound is None):
        lower_bound_a = lower_bound + [0]
    if not (upper_bound is None):
        upper_bound_a = upper_bound + [numpy.inf]

    args = (data, model_func, sel_dist, theta, lower_bound, upper_bound,
            verbose, multinom, flush_delay, func_args, func_kwargs,
            fixed_params, ll_scale, output_stream)

    p0 = Inference._project_params_down(p0, fixed_params)

    ####make sure to define consfunc and bnds ####
    if (not lower_bound is None) and (not upper_bound is None):
        bnds = tuple((x,y) for x,y in zip(lower_bound,upper_bound))
    outputs = scipy.optimize.fmin_slsqp(_object_func, 
                                       p0, bounds=bnds, args=args, 
                                       f_eqcons=constraint, epsilon=epsilon, 
                                       iter=maxiter,full_output=True,
                                       disp=False)
    xopt, fopt, func_calls, grad_calls, warnflag = outputs
    xopt = Inference._project_params_up(xopt, fixed_params)

    if output_file:
        output_stream.close()

    if not full_output:
        return [-fopt, xopt]
    else:
        return xopt, fopt, func_calls, grad_calls, warnflag
项目:gail-driver    作者:sisl    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        with tf.variable_scope(self._name):
            penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            params = target.get_params(trainable=True)
            grads = tf.gradients(penalized_loss, params)
            for idx, (grad, param) in enumerate(zip(grads, params)):
                if grad is None:
                    grads[idx] = tf.zeros_like(param)
            flat_grad = tensor_utils.flatten_tensor_variables(grads)
            return [
                tf.cast(penalized_loss, tf.float64),
                tf.cast(flat_grad, tf.float64),
            ]

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(
                inputs, loss, log_name="f_loss"),
            f_constraint=lambda: tensor_utils.compile_function(
                inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
            )
        )
项目:rllab    作者:rll    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        with tf.variable_scope(self._name):
            penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            params = target.get_params(trainable=True)
            grads = tf.gradients(penalized_loss, params)
            for idx, (grad, param) in enumerate(zip(grads, params)):
                if grad is None:
                    grads[idx] = tf.zeros_like(param)
            flat_grad = tensor_utils.flatten_tensor_variables(grads)
            return [
                tf.cast(penalized_loss, tf.float64),
                tf.cast(flat_grad, tf.float64),
            ]

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: tensor_utils.compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
            )
        )
项目:Panacea    作者:grzeimann    | 项目源码 | 文件源码
def fit_scale_wave0(init_scale, init_wave0, xi, xe, D, sun_wave, ysun, data, 
                    fixscale=False, dofit=True, buff=10,
                    plot=False, res=1.9): 
    '''
    Fitting function for linear wavelength solution of a small column section
    of one fiber.  
    '''
    if fixscale:
        def f(params, sel1):
            wv = init_scale * np.arange(D) + params[0]
            model = np.interp(wv[xi:xe+1], sun_wave, ysun, left=0.0, right=0.0)
            return model[sel1] - data[sel1]
        if dofit:
            params0 = np.array([init_wave0])
            sel = is_outlier(f(params0,np.ones(data.shape,dtype=bool)))<1
            sol = scipy.optimize.leastsq(f, params0, args=(sel))[0]
            chi2 = f(sol, sel+True )**2
        else: 
            params0 = np.arange(init_wave0-buff, init_wave0+buff, res/10.)
            sel = np.ones(data.shape,dtype=bool)
            chi2_manual = np.zeros(params0.shape)
            for i,p in enumerate(params0):
                chi2_manual[i] = (f([p], sel)**2).sum() 
            if plot:
                plt.figure()
                plt.plot(params0,chi2_manual)
                plt.yscale('log')
                plt.show()
                raw_input("Please press enter")
                plt.close()
            sol = [params0[chi2_manual.argmin()]]
            chi2 = f(sol, sel)**2
    else:
        def f(params, sel1):
            wv = params[0] * np.arange(D) + params[1]
            model = np.interp(wv[xi:xe+1], sun_wave, ysun, left=0.0, right=0.0)
            return model[sel1] - data[sel1]

        params0 = np.array([init_scale, init_wave0])
        sel = is_outlier(f(params0, np.ones(data.shape,dtype=bool)))<1
        if np.sum(sel)>3:
            sol = scipy.optimize.leastsq(f, params0, args=(sel))[0]
            chi2 = f(sol, sel+True )**2
        else:
            sol = params0
            chi2 = 1e6*np.ones(data.shape)
    return sol, chi2
项目:maml_rl    作者:cbfinn    | 项目源码 | 文件源码
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
        """
        :param loss: Symbolic expression for the loss function.
        :param target: A parameterized object to optimize over. It should implement methods of the
        :class:`rllab.core.paramerized.Parameterized` class.
        :param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        :param inputs: A list of symbolic variables as inputs
        :return: No return value.
        """
        constraint_term, constraint_value = leq_constraint
        with tf.variable_scope(self._name):
            penalty_var = tf.placeholder(tf.float32, tuple(), name="penalty")
        penalized_loss = loss + penalty_var * constraint_term

        self._target = target
        self._max_constraint_val = constraint_value
        self._constraint_name = constraint_name

        def get_opt_output():
            params = target.get_params(trainable=True)
            grads = tf.gradients(penalized_loss, params)
            for idx, (grad, param) in enumerate(zip(grads, params)):
                if grad is None:
                    grads[idx] = tf.zeros_like(param)
            flat_grad = tensor_utils.flatten_tensor_variables(grads)
            return [
                tf.cast(penalized_loss, tf.float64),
                tf.cast(flat_grad, tf.float64),
            ]

        self._opt_fun = ext.lazydict(
            f_loss=lambda: tensor_utils.compile_function(inputs, loss, log_name="f_loss"),
            f_constraint=lambda: tensor_utils.compile_function(inputs, constraint_term, log_name="f_constraint"),
            f_penalized_loss=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=[penalized_loss, loss, constraint_term],
                log_name="f_penalized_loss",
            ),
            f_opt=lambda: tensor_utils.compile_function(
                inputs=inputs + [penalty_var],
                outputs=get_opt_output(),
            )
        )
项目:treetime    作者:neherlab    | 项目源码 | 文件源码
def optimal_t_compressed(self, seq_pair, multiplicity):
        """
        Find the optimal distance between the two sequences
        """

        def _neg_prob(t, seq_pair, multiplicity):
            """
            Probability to observe child given the the parent state, transition
            matrix and the time of evolution (branch length).

            Parameters
            ----------

             t : double
                Branch length (time between sequences)

             parent :  numpy.array
                Parent sequence

             child : numpy.array
                Child sequence

             tm :  GTR
                Model of evolution

            Returns
            -------

             prob : double
                Negative probability of the two given sequences
                to be separated by the time t.
            """
            return -1.0*self.prob_t_compressed(seq_pair, multiplicity,t, return_log=True)

        try:
            from scipy.optimize import minimize_scalar
            opt = minimize_scalar(_neg_prob,
                    bounds=[0,ttconf.MAX_BRANCH_LENGTH],
                    method='bounded',
                    args=(seq_pair, multiplicity), options={'xatol':1e-8})
            new_len = opt["x"]
        except:
            import scipy
            print('legacy scipy', scipy.__version__)
            from scipy.optimize import fminbound
            new_len = fminbound(_neg_prob,
                    0,ttconf.MAX_BRANCH_LENGTH,
                    args=(seq_pair, multiplicity))
            opt={'success':True}

        if new_len > .9 * ttconf.MAX_BRANCH_LENGTH:
            self.logger("WARNING: GTR.optimal_t_compressed -- The branch length seems to be very long!", 4, warn=True)

        if opt["success"] != True:
            # return hamming distance: number of state pairs where state differs/all pairs
            new_len =  np.sum(multiplicity[seq_pair[:,1]!=seq_pair[:,0]])/np.sum(multiplicity)

        return new_len