Python utils 模块,evaluate() 实例源码

我们从Python开源项目中,提取了以下2个代码示例,用于说明如何使用utils.evaluate()

项目:IMF    作者:SoftSec-KAIST    | 项目源码 | 文件源码
def load_apilog(self, log_fname, limit):
        with open(log_fname, 'rb') as f:
            data = f.read().split('\n')[:-1]
        if len(data) %2 !=0:
            data = data[:-1]
        idx = 0
        apilogs = []
        while idx < len(data) and idx < limit*2:
            if data[idx][:2] == 'IN':
                il = utils.evaluate(data[idx][2:])
            else:
                utils.error('load_apilog: parse IN error')

            if data[idx+1][:3] == 'OUT' :
                ol = utils.evaluate(data[idx+1][3:])
            else:
                utils.error('load_apilog: parse OUT error')
            apilog = log.ApiLog(self.apis[il[0]], il, ol)
            apilogs.append(apilog)
            idx+=2
        return apilogs
项目:time_series_modeling    作者:rheineke    | 项目源码 | 文件源码
def fit_evaluate(X_train, X_test, y_train, y_test, pipeline, n_min=10000):
    pipeline_nm = utils.pipeline_name(pipeline)
    print(pipeline_nm)

    # Fit model
    start_time = time.perf_counter()
    pipeline.fit(X_train, y_train)
    end_time = time.perf_counter()
    print('Time elapsed to fit: {:.1f}s'.format(end_time - start_time))

    # Evaluate model
    start_time = time.perf_counter()
    utils.evaluate(X_train, X_test, y_train, y_test, pipeline)
    end_time = time.perf_counter()
    print('Time elapsed to evaluate: {:.1f}s'.format(end_time - start_time))

    # train_exponent = int(math.log10(len(X_train)))
    # train_sample_n = int(math.pow(10, max(train_exponent - 2, 2)))
    # train_sample_n = max(train_sample_n, min(n_min, len(X_train)))
    train_sample_n = 10000
    X_sample_train = X_train.sample(n=train_sample_n)
    y_sample_train = y_train.reindex(X_sample_train.index)

    test_exponent = int(math.log10(len(X_test)))
    test_sample_n = int(math.pow(10, max(test_exponent - 2, 2)))
    test_sample_n = max(test_sample_n, min(n_min, len(X_test)))
    X_sample_test = X_test.sample(n=test_sample_n)
    y_sample_test = y_test.reindex(X_sample_test.index)

    # Visually inspect residuals for goodness of fitness
    res_fig = utils.plot_residuals(X_sample_train,
                                   X_sample_test,
                                   y_sample_train,
                                   y_sample_test,
                                   pipeline)
    res_fmt = 'output/residual_{}.png'
    res_fig.savefig(res_fmt.format(pipeline_nm), dpi=200)

    # Learning curve
    start_time = time.perf_counter()
    learn_fig = utils.plot_learning_curve([pipeline], X_sample_train, y_sample_train)
    lc_fmt = 'output/learning_curve_{}.png'
    learn_fig.savefig(lc_fmt.format(pipeline_nm), dpi=200)
    end_time = time.perf_counter()
    print('Time elapsed for learning curves: {:.1f}s'.format(end_time - start_time))

    # Validation curve
    # start_time = time.perf_counter()
    # val_fig = utils.plot_validation_curve([pipeline],
    #                                       X_train,
    #                                       y_train,
    #                                       n_jobs=1)
    # vc_fmt = 'output/validation_curve_{}.png'
    # val_fig.savefig(vc_fmt.format(pipeline_nm), dpi=200)
    # end_time = time.perf_counter()
    # print('Time elapsed for validation curves: {:.1f}s'.format(end_time - start_time))