#!/usr/bin/env python
# coding: utf-8
import datetime
from pprint import pprint, pformat
import numpy as np
import pandas as pd
from model_lr_rf import print_metrics_results, plot_multiROCs_usePipelines, get_proc_lr_rf_data
from model_lr_rf import construct_pipelines, fit_multiple_pipelines
from utils.landslide_utils import load_df_fromfile
from gwlsa_settings import net_params
from utils import general_utils
from utils.general_utils import timer
from utils.landslide_utils import save_predY_toCSV
import gwlsa_settings as GS

@timer
def run_many_times_remove_some_factors(n=10, cls_weight={0: 1, 1: 1}):
    model_name_list = ('wlr', 'wrf')
    metric_results = {}
    for model_name in model_name_list:
        metric_results[model_name] = {}
    for j, drop_column in enumerate(GS.DROP_LIST):
        x_train_con, y_train_r_con, geoId_train_r_con, x_test, y_test_r, geoId_test_r = get_proc_lr_rf_data(drop_column=drop_column)
        drop_column_str = ''
        if type(drop_column) is list:
            drop_column_str = ",".join(drop_column)
        elif type(drop_column) is str:
            drop_column_str = drop_column
        # 用来存当前移除某个因子后各个模型的精度【临时存储】
        temp_sum_dict = {}
        for model_name in model_name_list:
            temp_sum_dict[model_name] = {}
            temp_sum_dict[model_name]['auc'] = 0
            temp_sum_dict[model_name]['f1'] = 0
            temp_sum_dict[model_name]['recall'] = 0
            temp_sum_dict[model_name]['AIC'] = 0
            temp_sum_dict[model_name]['r2'] = 0
        for i in range(n):
            print(f'--remove {drop_column_str},---Current loop {i + 1}---------------------------:')
            pipeline_list = construct_pipelines(cls_weight, model_list=model_name_list)
            # 拟合模型
            fit_multiple_pipelines(pipeline_list, model_name_list, x_train_con, y_train_r_con)
            print('-' * 50)
            # 评估【测试数据集】的预测精度
            for pipeline, model_name in zip(pipeline_list, model_name_list):
                metrics_dict = print_metrics_results(pipeline, x_test, y_test_r, model_name, print_results=True)
                temp_sum_dict[model_name]['auc'] = temp_sum_dict[model_name]['auc'] + metrics_dict['auc']
                temp_sum_dict[model_name]['f1'] = temp_sum_dict[model_name]['f1'] + metrics_dict['f1']
                temp_sum_dict[model_name]['recall'] = temp_sum_dict[model_name]['recall'] + metrics_dict['recall']
                temp_sum_dict[model_name]['AIC'] = temp_sum_dict[model_name]['AIC'] + metrics_dict['aic']
                temp_sum_dict[model_name]['r2'] = temp_sum_dict[model_name]['r2'] + metrics_dict['r2']
        if drop_column_str == '':
            drop_column_str = 'None'
        metric_key = GS.DROP_LIST_DICT[drop_column_str]  # key主要是为了方便显示，可以为base, A, A*......
        for model_name in model_name_list:
            t_auc_ave = temp_sum_dict[model_name]['auc'] / n
            t_f1_ave = temp_sum_dict[model_name]['f1'] / n
            t_recall_ave = temp_sum_dict[model_name]['recall'] / n
            t_aic_ave = temp_sum_dict[model_name]['AIC'] / n
            t_r2_ave = temp_sum_dict[model_name]['r2'] / n
            metric_results[model_name][metric_key] = f'ave-AUC: {t_auc_ave:.3f}, ave-f1: {t_f1_ave:.3f}, ave-recall: {t_recall_ave:.3f}, ave-AIC:{t_aic_ave:.3f}, ave-R2: {t_r2_ave:.3f}'
    # 禁用pprint的排序
    print('metric_results summary:')
    pprint(metric_results, width=100, sort_dicts=False)
    # 将结果写入到文件
    weights_str = f"weights_1to{cls_weight[1]}"
    d_str = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    file = open(f'paper秭归LR_RF(removeFactors{d_str})_{weights_str}.txt', 'w', encoding='utf-8')
    file.write(pformat(metric_results, width=100, sort_dicts=False))
    file.close()

@timer
def run_for_different_cls_weights(n=1,  min_weight=1, max_weight=15,
                                  save_test_pred_results=False,
                                  save_all_pred_results=False,
                                  save_fprTpr_to_npy=False,
                                  show_roc=False):
    # model_name_list = ['wlr', 'wrf', 'bwrf', 'wlgb']
    model_name_list = ('wlr', 'wrf')

    # 用来存储结果的
    different_weights_metrics = {}
    for model_name in model_name_list:
        different_weights_metrics[model_name]={}
    # Data Prepare
    train_df, val_df, test_df = load_df_fromfile(net_params['data_load_dir'], net_params['max_distance'],
                                                 net_params['resolution'], file_format=net_params['data_load_format'])

    train_val_df = pd.concat([train_df, val_df], ignore_index=False)
    all_df = pd.concat([train_val_df, test_df], ignore_index=False)
    X_all, y_all = all_df[net_params['x_column_names']].values, all_df[net_params['y_column_name']].values
    GeoID = all_df[net_params['id_column']].values
    # 合并的train_df和val_df作为训练数据集
    x_train_con, y_train_r_con = train_val_df[net_params['x_column_names']].values, train_val_df[
        net_params['y_column_name']].values
    x_test, y_test_r = test_df[net_params['x_column_names']].values, test_df[net_params['y_column_name']].values
    geoId_test_r  = test_df[net_params['y_column_name']].values
    for i in range(n):
        for w in range(min_weight, max_weight + 1):
            cls_weight = {0: 1, 1: w}  # 当前权重
            str_cls_weight = f'cls_weight 1:{w}'
            weights_str = f'weights_1to{w}'
            print(f'start..... {str_cls_weight}, current loop: {i+1}')

            # pipeline_wlr, pipeline_wrf, pipeline_bwrf, pipeline_wlgb = construct_pipelines(cls_weight, model_list=('wlr', 'wrf'))
            pipeline_list = construct_pipelines(cls_weight, model_list=model_name_list)

            # 注意，pipeline_list和model_name_list的位置和个数要严格对应
            # pipeline_list = [pipeline_wlr, pipeline_wrf, pipeline_bwrf, pipeline_wlgb]

            # 拟合模型
            fit_multiple_pipelines(pipeline_list, model_name_list, x_train_con, y_train_r_con)

            print('-' * 50)

            # 评估【测试数据集】的预测精度
            for pipeline, model_name in zip(pipeline_list, model_name_list):
                d = print_metrics_results(pipeline, x_test, y_test_r, model_name, print_results=True, whether_return_AIC=True)
                auc_val, f1_val, recall_val, AIC_val, r2_val = d['auc'], d['f1'], d['recall'], d['aic'], d['r2']
                different_weights_metrics[model_name][str_cls_weight] = f'auc:{auc_val:.3f}, f1-score: {f1_val:.3f}, recall: {recall_val:.3f}, AIC: {AIC_val:.3f}, R2: {r2_val:.3f}'
            # 绘制结果
            fpr_tpr_dicts = plot_multiROCs_usePipelines(pipeline_list, model_name_list, x_test, y_test_r, show_roc=show_roc)

            # 如果要保存fpr_tpr到npy格式的文件【测试数据集】，测试数据集是我们要评估精度的数据
            if save_fprTpr_to_npy:
                fprtpr_saved_dir = GS.FPR_TPR_DIR
                for model_name in model_name_list:
                    if model_name=='wrf':
                        cur_fpr_tpr = fpr_tpr_dicts[model_name]
                        cur_arr = np.array(cur_fpr_tpr)
                        np.save(f'{fprtpr_saved_dir}/fprTpr_{model_name}_{weights_str}_test_loop{i+1}.npy', cur_arr)

            predicted_csv_dir = GS.PREDICTED_Y_DIR
            # 预测结果并保存到文件【测试数据集】
            if save_test_pred_results:
                for cur_pipeline, model_name in zip(pipeline_list, model_name_list):
                    save_predY_toCSV(x_test, cur_pipeline, save_csv_filename=f'{predicted_csv_dir}/pred_{model_name}_{weights_str}_test_loop{i + 1}.csv',
                                     GeoID=geoId_test_r, model_type='rf_lr')
            # 预测结果并保存到文件【整个数据集】
            if save_all_pred_results:
                for cur_pipeline, model_name in zip(pipeline_list, model_name_list):
                    y_pred = cur_pipeline.predict(X_all)
                    y_pred_prob = cur_pipeline.predict_proba(X_all)[:, 1]
                    save_csv_filename = f'{predicted_csv_dir}/{model_name}_{weights_str}_all_loop{i + 1}.csv'
                    general_utils.save_results(GeoID, y_pred, y_pred_prob, save_csv_filename)
        print(f'----------Average results summary------------：')
        pprint(different_weights_metrics, width=200, sort_dicts=False)

    return pipeline_list, model_name_list, (x_train_con, y_train_r_con), (x_test, y_test_r, geoId_test_r)

if __name__ == '__main__':
    # 检查设置保存结果的各个文件夹是否存在，不存在则创建
    general_utils.check_data_settings_folder()

    run_for_different_cls_weights(n=1, min_weight=1, max_weight=22,
                                  save_test_pred_results=False,
                                  save_all_pred_results=True,
                                  save_fprTpr_to_npy=True,
                                  show_roc=False)
    # run_many_times_remove_some_factors(n=10, cls_weight={0: 1, 1: 4})
    print('Done!')