Python scipy.optimize 模块,OptimizeResult() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用scipy.optimize.OptimizeResult()

项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def load(filename, **kwargs):
    """
    Reconstruct a skopt optimization result from a file
    persisted with skopt.dump.

    Notice that the loaded optimization result can be missing
    the objective function (`.specs['args']['func']`) if `skopt.dump`
    was called with `store_objective=False`.

    Parameters
    ----------
    * `filename` [string or `pathlib.Path`]:
        The path of the file from which to load the optimization result.

    * `**kwargs` [other keyword arguments]:
        All other keyword arguments will be passed to `joblib.load`.

    Returns
    -------
    * `res` [`OptimizeResult`, scipy object]:
        Reconstructed OptimizeResult instance.
    """
    return load_(filename, **kwargs)
项目:cartesian    作者:Ohjeah    | 项目源码 | 文件源码
def optimize(fun, individual):
    """
    Prepare individual and fun to optimize fun(c | individual)

    :param fun: callable of lambda expression and its constant values.
    :param individual:
    :return: scipy.optimize.OptimizeResult
    """
    f = compile(individual)
    def h(consts=()):
        return fun(f, consts)

    expr, args = to_polish(individual, return_args=True)
    constants = [a for a in args if isinstance(a, Constant)]
    if constants:
        res = minimize(h, np.ones_like(constants))
        individual.consts = res.x
        return res
    else:
        return OptimizeResult(x=(), fun=h(), nfev=1, nit=0, success=True)
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def optimize(self, objectivefx, **kwargs):
        """
        Optimize a given function f over a domain.

        The optimizer class supports interruption. If during the optimization ctrl+c is pressed, the last best point is
        returned.

        The actual optimization routine is implemented in _optimize, to be implemented in subclasses.

        :param objectivefx: callable, taking one argument: a 2D numpy array. The number of columns correspond to the 
            dimensionality of the input domain.
        :return: OptimizeResult reporting the results.
        """
        objective = ObjectiveWrapper(objectivefx, **self._wrapper_args)
        try:
            result = self._optimize(objective, **kwargs)
        except KeyboardInterrupt:
            result = OptimizeResult(x=objective._previous_x,
                                    success=False,
                                    message="Caught KeyboardInterrupt, returning last good value.")
        result.x = np.atleast_2d(result.x)
        result.nfev = objective.counter
        return result
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def optimize(self, objectivefx):
        """
        The StagedOptimizer overwrites the default behaviour of optimize(). It passes the best point of the previous
        stage to the next stage. If the optimization is interrupted or fails, this process stops and the OptimizeResult 
        is returned.
        """

        results = []
        for current, following in zip(self.optimizers[:-1], self.optimizers[1:]):
            result = current.optimize(objectivefx)
            results.append(result)
            if not result.success:
                result.message += " StagedOptimizer interrupted after {0}.".format(current.__class__.__name__)
                break
            following.set_initial(self._best_x(results)[0])

        if result.success:
            result = self.optimizers[-1].optimize(objectivefx)
            results.append(result)

        result.nfev = sum(r.nfev for r in results)
        result.nstages = len(results)
        if any(r.success for r in results):
            result.x, result.fun = self._best_x(results)
        return result
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def optimize(self, objectivefx, n_iter=20):
        """
        Run Bayesian optimization for a number of iterations.

        Before the loop is initiated, first all points retrieved by :meth:`~.optim.Optimizer.get_initial` are evaluated
        on the objective and black-box constraints. These points are then added to the acquisition function 
        by calling :meth:`~.acquisition.Acquisition.set_data` (and hence, the underlying models). 

        Each iteration a new data point is selected for evaluation by optimizing an acquisition function. This point
        updates the models.

        :param objectivefx: (list of) expensive black-box objective and constraint functions. For evaluation, the 
            responses of all the expensive functions are aggregated column wise.
            Unlike the typical :class:`~.optim.Optimizer` interface, these functions should not return gradients. 
        :param n_iter: number of iterations to run
        :return: OptimizeResult object
        """
        fxs = np.atleast_1d(objectivefx)
        return super(BayesianOptimizer, self).optimize(lambda x: self._evaluate_objectives(x, fxs), n_iter=n_iter)
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def test_silent(self):
        class EmittingOptimizer(gpflowopt.optim.Optimizer):
            def __init__(self):
                super(EmittingOptimizer, self).__init__(gpflowopt.domain.ContinuousParameter('x0', 0, 1))

            def _optimize(self, objective):
                print('hello world!')
                return OptimizeResult(x=np.array([0.5]))

        # First, optimize with silent mode off. Should return the stdout of the optimizer
        opt = EmittingOptimizer()
        with self.captured_output() as (out, err):
            opt.optimize(None)
            output = out.getvalue().strip()
            self.assertEqual(output, 'hello world!')

        # Now with silent mode on
        with self.captured_output() as (out, err):
            with opt.silent():
                opt.optimize(None)
                output = out.getvalue().strip()
                self.assertEqual(output, '')
项目:sdaopt    作者:sgubianpm    | 项目源码 | 文件源码
def result(self):
        """ The OptimizeResult """
        res = OptimizeResult()
        res.x = self.es.xbest
        res.fun = self.es.ebest
        res.nit = self._iter
        res.ncall = self.owf.nb_fun_call
        return res
项目:pybroom    作者:tritemio    | 项目源码 | 文件源码
def tidy_scipy_result(result, param_names, **kwargs):
    """Tidy parameters data from scipy's `OptimizeResult`.

    Normally this function is not called directly but invoked by the
    general purpose function :func:`tidy`.
    Since `OptimizeResult` has a raw array of fitted parameters
    but no names, the parameters' names need to be passed in `param_names`.

    Arguments:
        result (`OptimizeResult`): the fit result object.
        param_names (string or list of string): names of the fitted parameters.
            It can either be a list of strings or a single string with
            space-separated names.

    Returns:
        A DataFrame in tidy format with one row for each parameter.

    Note:
        These two columns are always present in the returned DataFrame:

        - `name` (string): name of the parameter.
        - `value` (number): value of the parameter after the optimization.

        Optional columns (depending on the type of result) are:

        - `grad` (float): gradient for each parameter
        - `active_mask` (int)
    """
    Params = namedtuple('Params', param_names)
    params = Params(*result.x)
    df = dict_to_tidy(params._asdict(), **kwargs)
    for var in ('grad', 'active_mask'):
        if hasattr(result, var):
            df[var] = result[var]
    return df
项目:pybroom    作者:tritemio    | 项目源码 | 文件源码
def glance_scipy_result(result):
    """Tidy summary statistics from scipy's `OptimizeResult`.

    Normally this function is not called directly but invoked by the
    general purpose function :func:`glance`.

    Arguments:
        result (`OptimizeResult`): the fit result object.

    Returns:
        A DataFrame in tidy format with one row and several summary statistics
        as columns.

    Note:
        Possible columns of the returned DataFrame include:

        - `success` (bool): whether the fit succeed
        - `cost` (float): cost function
        - `optimality` (float): optimality parameter as returned by
          scipy.optimize.least_squares.
        - `nfev` (int): number of objective function evaluations
        - `njev` (int): number of jacobian function evaluations
        - `nit` (int): number of iterations
        - `status` (int): status returned by the fit routine
        - `message` (string): message returned by the fit routine
    """
    attr_names_all = ['success', 'cost', 'optimality', 'nfev', 'njev', 'nit'
                      'status', 'message']
    attr_names = [a for a in attr_names_all if hasattr(result, a)]
    if hasattr(result, 'fun') and np.size(result.fun) == 1:
        attr_names.append('fun')
    d = pd.DataFrame(index=range(1), columns=attr_names)
    for attr_name in attr_names:
        d.loc[0, attr_name] = getattr(result, attr_name)
    return d.apply(pd.to_numeric, errors='ignore')
项目:py-investment    作者:kprestel    | 项目源码 | 文件源码
def _solve_weights(self, returns: np.array,
                       covar: np.matrix) -> OptimizeResult:
        """
        Solve for the optimal weights.

        :param returns: numpy array of the average historical returns.
        :param covar: matrix of covariances.
        :return: optimal weights.
        """

        def fitness(weights, returns, covar, rf):
            mean, var = _mean_var(weights, returns, covar)
            sharpe = (mean - rf) / np.sqrt(var)
            return 1 / sharpe

        assets = len(returns)
        base_weights = np.ones([assets]) / assets
        # weights can be positive or negative
        b_ = [(0, 1) for _ in range(assets)]
        c_ = ({'type': 'eq', 'fun': lambda weights: sum(weights) - 1.0})
        optimized = minimize(fitness,
                             base_weights,
                             (returns, covar, self.rf),
                             method='SLSQP',
                             constraints=c_,
                             bounds=b_)
        if not optimized.success:
            raise BaseException(optimized.message)
        else:
            return optimized.x
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def check_result_callable(res):
    """
    Check that the result instance is set right at every callable call.
    """
    assert(isinstance(res, OptimizeResult))
    assert_equal(len(res.x_iters), len(res.func_vals))
    assert_equal(np.min(res.func_vals), res.fun)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def test_returns_result_object():
    base_estimator = ExtraTreesRegressor(random_state=2)
    opt = Optimizer([(-2.0, 2.0)], base_estimator, n_initial_points=1,
                    acq_optimizer="sampling")
    result = opt.tell([1.5], 2.)

    assert isinstance(result, OptimizeResult)
    assert_equal(len(result.x_iters), len(result.func_vals))
    assert_equal(np.min(result.func_vals), result.fun)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def eval_callbacks(callbacks, result):
    """Evaluate list of callbacks on result.

    The return values of the `callbacks` are ORed together to give the
    overall decision on whether or not the optimization procedure should
    continue.

    Parameters
    ----------
    * `callbacks` [list of callables]:
        Callbacks to evaluate.

    * `result` [`OptimizeResult`, scipy object]:
        Optimization result object to be stored.

    Returns
    -------
    * `decision` [bool]:
        Decision of the callbacks whether or not to keep optimizing
    """
    stop = False
    if callbacks:
        for c in callbacks:
            decision = c(result)
            if decision is not None:
                stop = stop or decision

    return stop
项目:cartesian    作者:Ohjeah    | 项目源码 | 文件源码
def return_opt_result(f, individual):
    """
    Ensure that f returns a scipy.optimize.OptimizeResult

    :param f: `callable(individual`
    :param individual: instance of cartesian.cgp.Base
    :type individual: instance of cartesian.cgp.Cartesian
    :return: OptimizeResult
    """
    res = f(individual)
    if not isinstance(res, OptimizeResult):
        res = OptimizeResult(x=(), fun=res, nit=0, nfev=1, success=True)
    return res
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def _optimize(self, objective):
        points = self._get_eval_points()
        evaluations = objective(points)
        idx_best = np.argmin(evaluations, axis=0)

        return OptimizeResult(x=points[idx_best, :],
                              success=True,
                              fun=evaluations[idx_best, :],
                              nfev=points.shape[0],
                              message="OK")
项目:GPflowOpt    作者:GPflow    | 项目源码 | 文件源码
def _create_bo_result(self, success, message):
        """
        Analyzes all data evaluated during the optimization, and return an OptimizeResult. Outputs of constraints
        are used to remove all infeasible points.

        :param success: Optimization successful? (True/False)
        :param message: return message
        :return: OptimizeResult object
        """
        X, Y = self.acquisition.data

        # Filter on constraints
        valid = self.acquisition.feasible_data_index()

        if not np.any(valid):
            return OptimizeResult(success=False,
                                  message="No evaluations satisfied the constraints")

        valid_X = X[valid, :]
        valid_Y = Y[valid, :]
        valid_Yo = valid_Y[:, self.acquisition.objective_indices()]

        # Differentiate between single- and multiobjective optimization results
        if valid_Y.shape[1] > 1:
            _, dom = non_dominated_sort(valid_Yo)
            idx = dom == 0  # Return the non-dominated points
        else:
            idx = np.argmin(valid_Yo)

        return OptimizeResult(x=valid_X[idx, :],
                              success=success,
                              fun=valid_Yo[idx, :],
                              message=message)
项目:pybroom    作者:tritemio    | 项目源码 | 文件源码
def tidy(result, var_names='key', **kwargs):
    """Tidy DataFrame containing fitted parameter data from `result`.

    A function to tidy any of the supported fit result
    (or a list of fit results). This function will identify input type
    and call the relative "specialized" tidying function. When the input
    is a list, the returned DataFrame contains data from all the fit
    results.
    Supported fit result objects are `lmfit.ModelResult`,
    `lmfit.MinimizeResult` and `scipy.optimize.OptimizeResult`.

    Arguments:
        result (fit result object or list): one of the supported fit result
            objects or a list of supported fit result objects. When a list,
            all the elements need to be of the same type.
        var_names (string or list): name(s) of the column(s) containing
            an "index" that is different for each element in the set of
            fit results.
        param_names (string or list of string): names of the fitted parameters
            for fit results which don't include parameter's names
            (such as scipy's OptimizeResult). It can either be a list of
            strings or a single string with space-separated names.
        **kwargs: additional arguments passed to the underlying specialized
            tidying function.

    Returns:
        A DataFrame with one row for each fitted parameter.
        Columns include parameter properties such as best-fit value,
        standard error, eventual bounds/constrains, etc.
        When a list of fit-result objects is passed, the column `var_name`
        (`'item'` by default) contains the index of the object
        in the list.

    See also:
        For more details on the returned DataFrame and on additional
        arguments refer to the specialized tidying functions:
        :func:`tidy_lmfit_result` and :func:`tidy_scipy_result`.
    """
    # Find out what result is and call the relevant function
    if isinstance(result, so.OptimizeResult):
        if 'param_names' not in kwargs:
            msg = "The argument `param_names` is required for this input type."
            raise ValueError(msg)
        return tidy_scipy_result(result, **kwargs)
    elif (isinstance(result, lmfit.model.ModelResult) or
          isinstance(result, lmfit.minimizer.MinimizerResult)):
        return tidy_lmfit_result(result)
    elif isinstance(result, list) or isinstance(result, dict):
        return _multi_dataframe(tidy, result, var_names, **kwargs)
    else:
        msg = 'Sorry, `tidy` does not support this object type (%s)'
        raise NotImplementedError(msg % type(result))
项目:pybroom    作者:tritemio    | 项目源码 | 文件源码
def glance(results, var_names='key', **kwargs):
    """Tidy DataFrame containing fit summaries from`result`.

    A function to tidy any of the supported fit result
    (or a list of fit results). This function will identify input type
    and call the relative "specialized" tidying function. When the input
    is a list, the returned DataFrame contains data from all the fit
    results.
    Supported fit result objects are `lmfit.ModelResult`,
    `lmfit.MinimizeResult` and `scipy.optimize.OptimizeResult`.

    Arguments:
        result (fit result object or list): one of the supported fit result
            objects or a list of supported fit result objects. When a list,
            all the elements need to be of the same type.
        var_names (string or list): name(s) of the column(s) containing
            an "index" that is different for each element in the set of
            fit results.
        **kwargs: additional arguments passed to the underlying specialized
            tidying function.

    Returns:
        A DataFrame with one row for each passed fit result.
        Columns include fit summaries such as reduced chi-square,
        number of evaluation, successful convergence, AIC, BIC, etc.
        When a list of fit-result objects is passed, the column `var_name`
        (`'item'` by default) contains the index of the object
        in the list.

    See also:
        For more details on the returned DataFrame and on additional
        arguments refer to the specialized tidying functions:
        :func:`glance_lmfit_result` and :func:`glance_scipy_result`.
    """
    if isinstance(results, so.OptimizeResult):
        return glance_scipy_result(results, **kwargs)
    elif (isinstance(results, lmfit.model.ModelResult) or
          isinstance(results, lmfit.minimizer.MinimizerResult)):
        return glance_lmfit_result(results)
    elif isinstance(results, list) or isinstance(results, dict):
        return _multi_dataframe(glance, results, var_names, **kwargs)
    else:
        msg = 'Sorry, `glance` does not support this object type (%s)'
        raise NotImplementedError(msg % type(results))
项目:pybroom    作者:tritemio    | 项目源码 | 文件源码
def _multi_dataframe(func, results, var_names, **kwargs):
    """Recursively call `func` on each item in `results` and concatenate output.

    Usually `func` is :func:`glance`, :func:`tidy` or :func:`augment`.
    The function `func` is also the calling function, therefore this implements
    a recursion which unpacks the nested `results` structure (a tree) and
    builds a global tidy DataFrame with "key" columns corresponding to
    the `results` structure.

    Arguments:
        func (function): function of the called on each element of `results`.
            Chose between `glance`, `tidy` or `augment`.
        results (dict or list): collection of fit results. It can be a list,
            a dict or a nested structure such as a dict of lists.
        var_names (list or string): names of DataFrame columns used to index
            the results. It can be a list of strings or single string in case
            only one categorical "index" is needed (i.e. a string is equivalent
            to a 1-element list of strings).

    Returns:
        "Tidy" DataFrame merging data from all the items in `results`.
        Necessary "key" columns are added to encode layout of fitting result
        objects in `results`.
    """
    if isinstance(results, so.OptimizeResult):
        raise ValueError('Input argument has wrong type: `OptimizeResult`.')
    if len(var_names) == 0:
        msg = ('The list `var_names` is too short. Its length should be equal '
               'to the nesting levels in `results`.')
        raise ValueError(msg)
    d = _as_odict_copy(results)
    var_names = _as_list_of_strings_copy(var_names)
    var_name = var_names.pop(0)
    for i, (key, res) in enumerate(d.items()):
        d[key] = func(res, var_names, **kwargs)
        d[key][var_name] = key
    df = pd.concat(d, ignore_index=True)
    # Convert "key" column to categorical only if input was dict-type
    # not list/tuple.
    if isinstance(results, dict):
        kw = {var_name: lambda x: pd.Categorical(x[var_name], ordered=True)}
        df = df.assign(**kw)
    return df
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def plot_evaluations(result, bins=20, dimensions=None):
    """Visualize the order in which points where sampled.

    The scatter plot matrix shows at which points in the search
    space and in which order samples were evaluated. Pairwise
    scatter plots are shown on the off-diagonal for each
    dimension of the search space. The order in which samples
    were evaluated is encoded in each point's color.
    The diagonal shows a histogram of sampled values for each
    dimension. A red point indicates the found minimum.

    Note: search spaces that contain `Categorical` dimensions are
          currently not supported by this function.

    Parameters
    ----------
    * `result` [`OptimizeResult`]
        The result for which to create the scatter plot matrix.

    * `bins` [int, bins=20]:
        Number of bins to use for histograms on the diagonal.

    * `dimensions` [list of str, default=None] Labels of the dimension
        variables. `None` defaults to `space.dimensions[i].name`, or
        if also `None` to `['X_0', 'X_1', ..]`.

    Returns
    -------
    * `ax`: [`Axes`]:
        The matplotlib axes.
    """
    space = result.space
    samples = np.asarray(result.x_iters)
    order = range(samples.shape[0])
    fig, ax = plt.subplots(space.n_dims, space.n_dims,
                           figsize=(2 * space.n_dims, 2 * space.n_dims))

    fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
                        hspace=0.1, wspace=0.1)

    for i in range(space.n_dims):
        for j in range(space.n_dims):
            if i == j:
                if space.dimensions[j].prior == 'log-uniform':
                    low, high = space.bounds[j]
                    bins_ = np.logspace(np.log10(low), np.log10(high), bins)
                else:
                    bins_ = bins
                ax[i, i].hist(samples[:, j], bins=bins_,
                              range=space.dimensions[j].bounds)

            # lower triangle
            elif i > j:
                ax[i, j].scatter(samples[:, j], samples[:, i], c=order,
                                 s=40, lw=0., cmap='viridis')
                ax[i, j].scatter(result.x[j], result.x[i],
                                 c=['r'], s=20, lw=0.)

    return _format_scatter_plot_axes(ax, space, ylabel="Number of samples",
                                     dim_labels=dimensions)
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def create_result(Xi, yi, space=None, rng=None, specs=None, models=None):
    """
    Initialize an `OptimizeResult` object.

    Parameters
    ----------
    * `Xi` [list of lists, shape=(n_iters, n_features)]:
        Location of the minimum at every iteration.

    * `yi` [array-like, shape=(n_iters,)]:
        Minimum value obtained at every iteration.

    * `space` [Space instance, optional]:
        Search space.

    * `rng` [RandomState instance, optional]:
        State of the random state.

    * `specs` [dict, optional]:
        Call specifications.

    * `models` [list, optional]:
        List of fit surrogate models.

    Returns
    -------
    * `res` [`OptimizeResult`, scipy object]:
        OptimizeResult instance with the required information.
    """
    res = OptimizeResult()
    yi = np.asarray(yi)
    if np.ndim(yi) == 2:
        res.log_time = np.ravel(yi[:, 1])
        yi = np.ravel(yi[:, 0])
    best = np.argmin(yi)
    res.x = Xi[best]
    res.fun = yi[best]
    res.func_vals = yi
    res.x_iters = Xi
    res.models = models
    res.space = space
    res.random_state = rng
    res.specs = specs
    return res
项目:scikit-optimize    作者:scikit-optimize    | 项目源码 | 文件源码
def expected_minimum(res, n_random_starts=20, random_state=None):
    """
    Compute the minimum over the predictions of the last surrogate model.

    Note that the returned minimum may not necessarily be an accurate
    prediction of the minimum of the true objective function.

    Parameters
    ----------
    * `res`  [`OptimizeResult`, scipy object]:
        The optimization result returned by a `skopt` minimizer.

    * `n_random_starts` [int, default=20]:
        The number of random starts for the minimization of the surrogate
        model.

    * `random_state` [int, RandomState instance, or None (default)]:
        Set random state to something other than None for reproducible
        results.

    Returns
    -------
    * `x` [list]: location of the minimum.

    * `fun` [float]: the surrogate function value at the minimum.
    """
    def func(x):
        reg = res.models[-1]
        x = res.space.transform(x.reshape(1, -1))
        return reg.predict(x.reshape(1, -1))[0]

    xs = [res.x]
    if n_random_starts > 0:
        xs.extend(res.space.rvs(n_random_starts, random_state=random_state))

    best_x = None
    best_fun = np.inf

    for x0 in xs:
        r = sp_minimize(func, x0=x0, bounds=res.space.bounds)

        if r.fun < best_fun:
            best_x = r.x
            best_fun = r.fun

    return [v for v in best_x], best_fun
项目:cartesian    作者:Ohjeah    | 项目源码 | 文件源码
def oneplus(fun, random_state=None, cls=None, lambda_=4, max_iter=100,
            max_nfev=None, f_tol=0, n_jobs=1, seed=None):

    """
    1 + lambda algorithm.
    In each generation, create lambda offspring and compare their fitness to the parent individual.
    The fittest individual carries over to the next generation. In case of a draw, the offspring is prefered.


    :param fun: `callable(individual)`, function to be optimized
    :param random_state: an instance of np.random.RandomState, a seed integer or None
    :param cls: The base class for individuals
    :type cls: (optional) instance of cartesian.cgp.Cartesian
    :param seed: (optional) can be passed instead of cls.
    :param lambda_: number of offspring per generation
    :param max_iter: maximum number of generations
    :param max_nfev: maximum number of function evaluations. Important, if fun is another optimizer
    :param f_tol: threshold for precision
    :param n_jobs: number of jobs for joblib embarrassingly easy parallel

    :return: scipy.optimize.OptimizeResult with non-standard attributes
    res.x = values for constants
    res.expr = expression
    res.fun = best value for the function
    """
    max_iter = max_nfev if max_nfev else max_iter
    max_nfev = max_nfev or math.inf

    random_state = check_random_state(random_state)

    best = seed or cls.create(random_state=random_state)
    best_res = return_opt_result(fun, best)

    nfev = best_res.nfev
    res = OptimizeResult(expr=best, x=best_res.x, fun=best_res.fun, nit=0, nfev=nfev, success=False)

    if best_res.fun <= f_tol:
        res["success"] = True
        return res

    for i in range(1, max_iter):
        offspring = [point_mutation(best, random_state=random_state) for _ in range(lambda_)]

        # with Parallel(n_jobs=n_jobs) as parallel:
        #         offspring_fitness = parallel(delayed(return_opt_result)(fun, o) for o in offspring)
        offspring_fitness = [return_opt_result(fun, o) for o in offspring]
        best, best_res = min(zip(offspring + [best], offspring_fitness + [best_res]), key=lambda x: x[1].fun)
        nfev += sum(of.nfev for of in offspring_fitness)

        res = OptimizeResult(expr=best, x=best_res.x, fun=best_res.fun, nit=i, nfev=nfev, success=False)
        if res.fun <= f_tol:
            res["success"] = True
            return res
        elif res.nfev >= max_nfev:
            return res

    return res
项目:bolib    作者:ibaidev    | 项目源码 | 文件源码
def minimize(self, fun, x0, args=(), bounds=(), **unknown_options):
        """

        :param fun:
        :type fun:
        :param x0:
        :type x0:
        :param args:
        :type args:
        :param bounds:
        :type bounds:
        :param unknown_options:
        :type unknown_options:
        :return:
        :rtype:
        """
        np.random.seed(self.seed)

        # Sample more points around x0
        x0_square_size = 1e-05
        x0_square_samples = 0.1
        x_t = np.vstack((
            random_sample(
                [
                    (
                        x0_i - (x0_square_size * (bounds_i[1]-bounds_i[0])),
                        x0_i + (x0_square_size * (bounds_i[1]-bounds_i[0]))
                    )
                    for x0_i, bounds_i in zip(x0, bounds)
                ],
                int(self.batch_size * x0_square_samples)
            ),
            random_sample(
                bounds,
                int(self.batch_size * (1.0-x0_square_samples))
            )
        ))

        y_t = fun(x_t)

        best = np.argmin(y_t)

        x_best = x_t[best, :][None, :]
        y_best = y_t[best, :][None, :]

        return spo.OptimizeResult(
            fun=y_best,
            x=x_best,
            nit=1,
            nfev=self.batch_size,
            success=True
        )