#!/usr/bin/env python
# coding: utf-8
from pprint import pprint, pformat
import datetime
import numpy as np
from sklearn import metrics
from utils.general_utils import timer
from model_dnn import run_once_dnn
import gwlsa_settings as GS
from utils import general_utils
from utils.landslide_utils import save_predY_toCSV, predict_class_prob, get_proc_nn_data

@timer
def run_many_times_remove_some_factors(n=10, nb_max_epochs=500, cls_weight={0: 1, 1: 1}, bat_size=1000, flatten_neighbours=False):
    t_cnt = len(GS.DROP_LIST)
    metric_results = {}
    for j, drop_column in enumerate(GS.DROP_LIST):
        (x_all, y_onehot, geoId_r), (x_train, y_train_onehot, geoId_train_r), (x_val, y_val_onehot, geoId_val_r), \
            (x_test, y_test_onehot, geoId_test_r), y_test_r, channels, nb_features = get_proc_nn_data(model_type='dnn',
                                                                                                      load_from_csv=False,
                                                                                                      flatten_neighbours=flatten_neighbours,
                                                                                                      drop_column=drop_column,
                                                                                                      whether_return_GeoId=True)
        drop_column_str = ''
        if type(drop_column) is list:
            drop_column_str = ",".join(drop_column)
        elif type(drop_column) is str:
            drop_column_str = drop_column
        nb_mid_layers = 0
        f1_sum = 0.0
        auc_sum = 0.0
        recall_sum = 0.0
        aic_sum = 0.0
        r2_sum = 0.0
        for i in range(n):
            print(f'--remove {drop_column_str},---Current loop {i + 1}---------------------------:')
            d, model, hist = run_once_dnn(x_train=x_train, y_train_onehot=y_train_onehot,
                                          x_test=x_test, y_test_onehot=y_test_onehot,
                                          y_test_not_onehot=y_test_r,
                                          channels=channels,
                                          nb_epochs=nb_max_epochs,
                                          nb_mid_layers=nb_mid_layers,
                                          cls_weight=cls_weight,
                                          nb_pca_or_nb_features=nb_features,
                                          verbose=0,
                                          node_num=8,
                                          lr_rate=0.001,
                                          bat_size=bat_size,
                                          val_split=0.2,
                                          val_data=(x_val, y_val_onehot))
            auc_sum = auc_sum + d['auc']
            f1_sum = f1_sum + d['f1']
            recall_sum = recall_sum + d['recall']
            aic_sum = aic_sum + d['aic']
            r2_sum = r2_sum + d['r2']
        if drop_column_str == '':
            drop_column_str = 'None'
        metric_key = GS.DROP_LIST_DICT[drop_column_str]  # key主要是为了方便显示，可以为base, A, A*......
        metric_results[
            metric_key] = f'ave-AUC: {auc_sum / n:.3f}, ave-f1: {f1_sum / n:.3f}, ave-recall: {recall_sum / n:.3f}, ave AIC: {aic_sum / n:.3f}, ave R2: {r2_sum / n:.3f} '
    print(str(cls_weight), ', metric_results summary:')
    pprint(metric_results, width=100, sort_dicts=False)
    # 将结果写入到文件
    csl_weight_str = f"weights_{cls_weight[0]}to{cls_weight[1]}"
    d_str = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    file = open(f'paper秭归DNN(removeFactors{d_str})_{csl_weight_str}.txt', 'w', encoding='utf-8')
    file.write(pformat(metric_results, width=100, sort_dicts=False))
    file.close()

######-------不同cls_weight时，精度的变化指定的参数执行n次，求平均--------
@timer
def run_many_times_for_different_cls_weights(n=10, nb_max_epochs=500, max_weight=15, drop_column=None, bat_size=1000, flatten_neighbours=False):
    nb_mid_layers = 0
    # 用来保存结果的字典，类似于different_weights_metrics['1:5']=(auc,f1,recall)
    different_weights_metrics = {}
    (x_all, y_onehot, geoId_r), (x_train, y_train_onehot, geoId_train_r), (x_val, y_val_onehot, geoId_val_r), \
        (x_test, y_test_onehot, geoId_test_r), y_test_r, channels, nb_features = get_proc_nn_data(model_type='dnn',
                                                                                                      load_from_csv=False,
                                                                                                      flatten_neighbours=flatten_neighbours,
                                                                                                      drop_column=drop_column,
                                                                                                      whether_return_GeoId=True)
    for w in range(1, max_weight + 1):
        cls_weight = {0: 1, 1: w}  # 当前权重
        str_cls_weight = f'cls_weight 1:{w}'
        different_weights_metrics[str_cls_weight] = {}
        nb_epochs = nb_max_epochs
        f1_sum = 0.0
        auc_sum = 0.0
        recall_sum = 0.0
        aic_sum = 0.0
        r2_sum = 0.0
        # 当前权重下，跑n次，求平均值
        for i in range(n):
            print(f'----cls_weight: {str_cls_weight}, ----Current loop {i + 1}---------------------------:')
            d, model, hist = run_once_dnn(x_train=x_train, y_train_onehot=y_train_onehot,
                                          x_test=x_test, y_test_onehot=y_test_onehot,
                                          y_test_not_onehot=y_test_r,
                                          channels=channels,
                                          nb_epochs=nb_epochs,
                                          nb_mid_layers=nb_mid_layers,
                                          cls_weight=cls_weight,
                                          nb_pca_or_nb_features=nb_features,
                                          verbose=0,
                                          node_num=8,
                                          lr_rate=0.001,
                                          bat_size=bat_size,
                                          val_split=0.2,
                                          val_data=(x_val, y_val_onehot))
            print(f"AUC: {d['auc']:.3f}, F1-score: {d['f1']:.3f}, Recall: {d['recall']:.3f}"
                  f"AIC: {d['aic']:.3f}, R2: {d['r2']:.3f}")
            auc_sum = auc_sum + d['auc']
            f1_sum = f1_sum + d['f1']
            recall_sum = recall_sum + d['recall']
            aic_sum = aic_sum + d['aic']
            r2_sum = r2_sum + d['r2']
        # 求当前权重下，多次精度的平均值
        different_weights_metrics[
            str_cls_weight] = f'ave-AUC: {auc_sum / n:.3f}, ave-f1: {f1_sum / n:.3f}, ave-recall: {recall_sum / n:.3f}, ave AIC: {aic_sum / n:.3f}, ave R2: {r2_sum / n:.3f}'
    print(f'----------Average results summary------------：')
    pprint(different_weights_metrics, sort_dicts=False)


######-------指定的参数执行n次，求平均--------
@timer
def run_many_times(n=10, nb_max_epochs=500, cls_weight={0: 1, 1: 1},
                   bat_size=1000,
                   save_test_pred_results=False,
                   save_all_pred_results=False,
                   save_fprTpr_to_npy=False,
                   save_trained_model=False,
                   drop_column=None,
                   node_num=8,
                   nb_mid_layers=0,
                   use_early_stop=True,
                   flatten_neighbours=False):
    results_lst = []
    f1_sum = 0.0
    auc_sum = 0.0
    recall_sum = 0.0
    aic_sum = 0.0
    r2_sum = 0.0
    str_weights = f"weights_{cls_weight[0]}to{cls_weight[1]}"
    # 获取处理后的数据
    (x_all, y_onehot, geoId_r), (x_train, y_train_onehot, geoId_train_r), (x_val, y_val_onehot, geoId_val_r), \
        (x_test, y_test_onehot, geoId_test_r), y_test_r, channels, nb_features = get_proc_nn_data(model_type='dnn',
                                                                                                  load_from_csv=False,
                                                                                                  flatten_neighbours=flatten_neighbours,
                                                                                                  drop_column=drop_column,
                                                                                                  whether_return_GeoId=True)
    for i in range(n):
        nb_epochs = nb_max_epochs
        print(f'-------------------Current loop {i + 1}---------------------------:')
        d, model, hist = run_once_dnn(x_train=x_train, y_train_onehot=y_train_onehot,
                                      x_test=x_test, y_test_onehot=y_test_onehot,
                                      y_test_not_onehot=y_test_r,
                                      channels=channels,
                                      nb_epochs=nb_epochs,
                                      nb_mid_layers=nb_mid_layers,
                                      cls_weight=cls_weight,
                                      nb_pca_or_nb_features=nb_features,
                                      verbose=0,
                                      node_num=node_num,
                                      lr_rate=0.001,
                                      bat_size=bat_size,
                                      val_split=0.2,
                                      val_data=(x_val, y_val_onehot),
                                      use_early_stop=use_early_stop)
        # 保存已经训练好的模型
        if save_trained_model:
            saved_model_dir = GS.SAVED_MODEL_DIR
            model_save_filepath = f'{saved_model_dir}/model_{str_weights}_dnn_loop{i + 1}.h5'
            model.save(model_save_filepath)
        # 保存fpr,tpr方便后续绘制roc曲线图
        if save_fprTpr_to_npy:
            fprtpr_saved_dir = GS.FPR_TPR_DIR
            # 保存fpr, tpr到npy文件，以便后续加载绘制roc曲线图
            _, y_pred_proba = predict_class_prob(model, x_test, bat_size=bat_size, model_type='keras_dnn_lstm')
            fpr, tpr, thresholds = metrics.roc_curve(y_test_r, y_pred_proba)
            cur_arr = np.array((fpr, tpr))
            np.save(f'{fprtpr_saved_dir}/fprTpr_{str_weights}_dnn_test_loop{i + 1}.npy', cur_arr)
        predicted_csv_dir = GS.PREDICTED_Y_DIR
        # 预测结果并保存到文件【测试数据集】
        if save_test_pred_results:
            save_predY_toCSV(x_test, model, f'{predicted_csv_dir}/results_{str_weights}_dnn_test_loop{i + 1}.csv',
                             geoId_test_r, bat_size=bat_size)
        # 预测结果并保存到文件【整个数据集】
        if save_all_pred_results:
            save_predY_toCSV(x_all, model, f'{predicted_csv_dir}/results_{str_weights}_dnn_all_loop{i + 1}.csv',
                             geoId_r, bat_size=bat_size)

        performance_str = (
            f"loop: {i + 1}, nb_mid_layers: {nb_mid_layers}, AUC: {d['auc']:.3f}, F1-score: {d['f1']:.3f}, Recall: {d['recall']:.3f}, "
            f"AIC: {d['aic']:.3f}, R2: {d['r2']:.3f}")
        results_lst.append(performance_str)

        auc_sum = auc_sum + d['auc']
        f1_sum = f1_sum + d['f1']
        recall_sum = recall_sum + d['recall']
        aic_sum = aic_sum + d['aic']
        r2_sum = r2_sum + d['r2']
        # plotLoss(hist) # 绘制loss曲线
    pprint(results_lst, sort_dicts=False, width=200)
    print(f'-------------Average results of {n} times:-------------')
    ave_str = f'ave AUC: {auc_sum / n:.3f}, ave f1: {f1_sum / n:.3f}, ave recall: {recall_sum / n:.3f}, ave AIC: {aic_sum / n:.3f}, ave R2: {r2_sum / n:.3f}'
    print(ave_str)
    return ave_str


if __name__ == '__main__':
    # 检查设置保存结果的各个文件夹是否存在，不存在则创建
    general_utils.check_data_settings_folder()

    # run_many_times_for_different_cls_weights(n=10, nb_max_epochs=500, max_weight=1)
    # print('----------------------开始权重比值1:1-------------------')
    performance_list = []
    for weight in range(0, 1):
        performance = run_many_times(n=1, nb_max_epochs=5,
                       cls_weight={0: 1, 1: weight+1},
                       save_test_pred_results=False,
                       save_all_pred_results=False,
                       save_fprTpr_to_npy=False,
                       save_trained_model=False,
                       node_num=480,
                       nb_mid_layers=0,
                       use_early_stop=False,
                       bat_size=1000,
                       flatten_neighbours=False
                       )
        performance_list.append(performance)
    pprint(performance_list, width=200, sort_dicts=False)

    # print('----------------------开始权重比值1:4-------------------')
    # run_many_times(n=10, nb_max_epochs=500,
    #                cls_weight={0: 1, 1: 4},
    #                save_test_pred_results=False,
    #                save_all_pred_results=False,
    #                save_fprTpr_to_npy=False,
    #                save_trained_model=False,
    #                node_num=480,
    #                nb_mid_layers=0,
    #                flatten_neighbours=False
    #                )
    # run_many_times_remove_some_factors(n=10, nb_max_epochs=500,
    #                                    cls_weight={0: 1, 1: 4})
