#!/usr/bin/env python
# coding: utf-8
# 利用Keras和LSTM作滑坡敏感性制图
from pprint import pprint, pformat
import datetime
import numpy as np
from sklearn import metrics
from model_lstm import run_once_lstm
import gwlsa_settings as GS
from utils import general_utils
from utils.general_utils import timer
from utils.landslide_utils import save_predY_toCSV, predict_class_prob, get_proc_nn_data


######-----通过网格搜索找出最合适的结点数和中间层数-----------
@timer
def grid_search_node_and_layers(n=5, nb_max_epochs=200, cls_weight={0: 1, 1: 4}, node_nums_to_try = [10, 20, 30, 50], nb_mid_layers_to_try = [2, 3, 4, 5]):
    # 1. 准备数据（只需准备一次，避免重复操作）
    (x_all, y_onehot), (x_train, y_train_onehot), (x_val, y_val_onehot), \
        (x_test, y_test_onehot), y_test_r, channels, nb_features = get_proc_nn_data(model_type='lstm', drop_column=None, whether_return_GeoId=False)

    # 3. 创建一个字典来保存所有结果
    results_dict = {}

    # 4. 遍历所有组合
    for node_num in node_nums_to_try:
        for nb_mid_layers in nb_mid_layers_to_try:
            print(f"\n=== 开始网格搜索：node_num={node_num}, nb_mid_layers={nb_mid_layers} ===\n")

            # 为当前组合初始化指标累加器
            auc_sum, f1_sum, recall_sum, aic_sum, r2_sum = 0.0, 0.0, 0.0, 0.0, 0.0

            # 5. 对当前组合运行 n 次，取平均值
            for i in range(n):
                print(f'--- 当前组合循环 {i + 1}/{n} ---')
                d, model, hist = run_once_lstm(
                    x_train=x_train,
                    y_train_onehot=y_train_onehot,
                    x_test=x_test,
                    y_test_onehot=y_test_onehot,
                    y_test_not_onehot=y_test_r,
                    channels=channels,
                    nb_epochs=nb_max_epochs,
                    nb_mid_layers=nb_mid_layers,  # 当前测试的层数
                    cls_weight=cls_weight,
                    nb_pca_or_nb_features=nb_features,
                    verbose=0,
                    node_num=node_num,  # 当前测试的节点数
                    lr_rate=0.001,
                    bat_size=10000,
                    val_split=0.2,
                    val_data=(x_val, y_val_onehot)
                )
                # 累加指标
                auc_sum += d['auc']
                f1_sum += d['f1']
                recall_sum += d['recall']
                aic_sum += d['aic']
                r2_sum += d['r2']

            # 6. 计算当前参数组合的平均性能
            combo_key = f"nodes_{node_num}_layers_{nb_mid_layers}"
            results_dict[combo_key] = {
                'ave_auc': auc_sum / n,
                'ave_f1': f1_sum / n,
                'ave_recall': recall_sum / n,
                'ave_aic': aic_sum / n,
                'ave_r2': r2_sum / n
            }
            # 打印当前组合的平均结果
            print(f"\n*** 组合 {combo_key} 的平均结果 ***")
            print(
                f"AUC: {results_dict[combo_key]['ave_auc']:.3f}, F1: {results_dict[combo_key]['ave_f1']:.3f}, Recall: {results_dict[combo_key]['ave_recall']:.3f}")

    # 7. 搜索结束，打印所有结果
    print("\n" + "=" * 50)
    print("网格搜索最终结果汇总:")
    print("=" * 50)
    # 按 F1的值 从高到低排序并打印
    sorted_results = sorted(results_dict.items(), key=lambda x: x[1]['ave_f1'], reverse=True)
    for combo, metrics in sorted_results:
        print(
            f"{combo:20} | AUC: {metrics['ave_auc']:.3f} | F1: {metrics['ave_f1']:.3f} | Recall: {metrics['ave_recall']:.3f} | AIC: {metrics['ave_aic']:.3f} | R2: {metrics['ave_r2']:.3f}")

    # 8. (可选) 将结果保存到文件
    d_str = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    filename = f'grid_search_results_lstm_{d_str}.txt'
    with open(filename, 'w', encoding='utf-8') as f:
        f.write("网格搜索 (node_num, nb_mid_layers) 结果:\n")
        f.write("排序依据: AUC (降序)\n")
        f.write("=" * 80 + "\n")
        for combo, metrics in sorted_results:
            f.write(
                f"{combo:20} | AUC: {metrics['ave_auc']:.3f} | F1: {metrics['ave_f1']:.3f} | Recall: {metrics['ave_recall']:.3f} | AIC: {metrics['ave_aic']:.3f} | R2: {metrics['ave_r2']:.3f}\n")
    print(f"\n详细结果已保存到文件: {filename}")

    return results_dict

@timer
def run_many_times_drop_columns(n=10, nb_max_epochs=500, cls_weight={0: 1, 1: 1}, flatten_neighbours=False):
    t_cnt = len(GS.DROP_LIST)
    metric_results = {}
    for j, drop_column in enumerate(GS.DROP_LIST):
        x_all, y_onehot, x_train, y_train_onehot, x_val, y_val_onehot, x_test, y_test_onehot, y_test_r, \
        channels, nb_features = get_proc_nn_data(model_type='lstm',
                                                 drop_column=drop_column,
                                                 whether_return_GeoId=False,
                                                 flatten_neighbours=flatten_neighbours)
        drop_column_str = ''
        if type(drop_column) is list:
            drop_column_str = ",".join(drop_column)
        elif type(drop_column) is str:
            drop_column_str = drop_column
        nb_mid_layers = 0
        f1_sum = 0.0
        auc_sum = 0.0
        recall_sum = 0.0
        for i in range(n):
            print(f'--remove {drop_column_str},---Current loop {i + 1}---------------------------:')
            d, model, hist = run_once_lstm(x_train=x_train, y_train_onehot=y_train_onehot,
                                                          x_test=x_test, y_test_onehot=y_test_onehot,
                                                          y_test_not_onehot=y_test_r,
                                                          channels=channels,
                                                          nb_epochs=nb_max_epochs,
                                                          nb_mid_layers=4,
                                                          cls_weight=cls_weight,
                                                          nb_pca_or_nb_features=nb_features,
                                                          verbose=0,
                                                          node_num=20,
                                                          lr_rate=0.001,
                                                          bat_size=10000,
                                                          val_split=0.2,
                                                          val_data=(x_val, y_val_onehot))
            auc_sum = auc_sum + d['auc']
            f1_sum = f1_sum + d['f1']
            recall_sum = recall_sum + d['recall']
            aic_sum = aic_sum + d['aic']
            r2_sum = r2_sum + d['r2']
        if drop_column_str == '':
            drop_column_str = 'None'
        metric_key = GS.DROP_LIST_DICT[drop_column_str]  # key主要是为了方便显示，可以为base, A, A*......
        metric_results[metric_key] = f'ave-AUC: {auc_sum / n:.3f}, ave-f1: {f1_sum / n:.3f}, ave-recall: {recall_sum / n:.3f}, ave AIC: {aic_sum/n:.3f}, ave R2: {r2_sum/n:.3f} '
    print(str(cls_weight), ', metric_results summary:')
    pprint(metric_results, width=100, sort_dicts=False)
    # 将结果写入到文件
    csl_weight_str = f"weights_{cls_weight[0]}to{cls_weight[1]}"
    d_str = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    file = open(f'paper秭归LSTM(removeFactors{d_str})_{csl_weight_str}.txt', 'w', encoding='utf-8')
    file.write(pformat(metric_results, width=100, sort_dicts=False))
    file.close()

######-------不同cls_weight时，精度的变化指定的参数执行n次，求平均--------
@timer
def run_many_times_class_weights(n=10, nb_max_epochs=500, max_weight=20,
                                 node_num=20, nb_mid_layers=4, flatten_neighbours=False):
    (x_all, y_onehot, geoId_r), (x_train, y_train_onehot, geoId_train_r), (x_val, y_val_onehot, geoId_val_r), \
        (x_test, y_test_onehot, geoId_test_r), y_test_r, channels, nb_features = get_proc_nn_data(model_type='lstm',
                                                                                                  drop_column=None,
                                                                                                  whether_return_GeoId=True,
                                                                                                  flatten_neighbours=flatten_neighbours)
    # 用来保存结果的字典，类似于different_weights_metrics['1:5']=(auc,f1,recall)
    different_weights_metrics={}

    for w in range(1, max_weight + 1):
        cls_weight = {0: 1, 1: w}  # 当前权重
        str_cls_weight = f'cls_weight 1:{w}'
        different_weights_metrics[str_cls_weight] = {}
        nb_epochs = nb_max_epochs
        f1_sum = 0.0
        auc_sum = 0.0
        recall_sum = 0.0
        aic_sum = 0.0
        r2_sum = 0.0
        # 当前权重下，跑n次，求平均值
        for i in range(n):
            print(f'----{str_cls_weight}, ----Current loop {i+1}---------------------------:')
            d, model, hist = run_once_lstm(x_train=x_train, y_train_onehot=y_train_onehot,
                                                          x_test=x_test, y_test_onehot=y_test_onehot,
                                                          y_test_not_onehot=y_test_r,
                                                          channels=channels,
                                                          nb_epochs=nb_epochs,
                                                          nb_mid_layers=nb_mid_layers,
                                                          cls_weight=cls_weight,
                                                          nb_pca_or_nb_features=nb_features,
                                                          verbose=0,
                                                          node_num=node_num,
                                                          lr_rate=0.001,
                                                          bat_size=10000,
                                                          val_split=0.2,
                                                          val_data=(x_val, y_val_onehot))
            # predict_allX_saveCSV(x_test, model, './results/results_lstm_0917.csv', geoId_test_df.values, bat_size=1000)
            print(f"AUC: {d['auc']:.3f}, F1-score: {d['f1']:.3f}, Recall: {d['recall']:.3f}, AIC: {d['aic']:.3f}, R2: {d['r2']:.3f}")
            auc_sum = auc_sum + d['auc']
            f1_sum = f1_sum + d['f1']
            recall_sum = recall_sum + d['recall']
            aic_sum = aic_sum + d['aic']
            r2_sum = r2_sum + d['r2']
        # 求当前权重下，多次精度的平均值
        different_weights_metrics[str_cls_weight] = f'ave-AUC: {auc_sum/n:.3f}, ave-f1: {f1_sum/n:.3f}, ave-recall: {recall_sum/n:.3f}, ave AIC: {aic_sum/n:.3f}, ave R2: {r2_sum/n:.3f}'
    print(f'----------Average results summary------------：')
    pprint(different_weights_metrics, width=200, sort_dicts=False)

######-------指定的参数执行n次，求平均--------
@timer
def run_many_times(n=10, nb_max_epochs=500, cls_weight = {0: 1, 1: 1},
                   node_num=20,nb_mid_layers = 4,
                   save_test_pred_results=False,
                   save_all_pred_results=False,
                   save_fprTpr_to_npy=False,
                   save_trained_model=False,
                   use_early_stop=False,
                   flatten_neighbours=False):
    results_lst = []
    f1_sum = 0.0
    auc_sum = 0.0
    recall_sum = 0.0
    aic_sum = 0.0
    r2_sum = 0.0

    str_weights = f'weights_1to{cls_weight[1]}'
    (x_all, y_onehot, geoId_r), (x_train, y_train_onehot, geoId_train_r), (x_val, y_val_onehot, geoId_val_r), \
    (x_test, y_test_onehot, geoId_test_r), y_test_r, channels, nb_features = get_proc_nn_data(model_type='lstm',
                                                                                              drop_column=None,
                                                                                              whether_return_GeoId=True,
                                                                                              flatten_neighbours=flatten_neighbours)
    for i in range(n):
        nb_epochs = nb_max_epochs
        print(f'-------------------Current loop {i+1}---------------------------:')
        d, model, hist = run_once_lstm(x_train=x_train, y_train_onehot=y_train_onehot,
                                                      x_test=x_test, y_test_onehot=y_test_onehot,
                                                      y_test_not_onehot=y_test_r,
                                                      channels=channels,
                                                      nb_epochs=nb_epochs,
                                                      nb_mid_layers=nb_mid_layers,
                                                      cls_weight=cls_weight,
                                                      nb_pca_or_nb_features=nb_features,
                                                      verbose=0,
                                                      node_num=node_num,
                                                      lr_rate=0.001,
                                                      bat_size=10000,
                                                      val_split=0.2,
                                                      val_data=(x_val, y_val_onehot),
                                                      use_early_stop=use_early_stop)
        # 保存已经训练好的模型
        if save_trained_model:
            saved_model_dir = GS.SAVED_MODEL_DIR
            model_save_filepath = f'{saved_model_dir}/model_{str_weights}_lstm_loop{i + 1}.h5'
            model.save(model_save_filepath)
        # 保存fpr,tpr方便后续绘制roc曲线图
        if save_fprTpr_to_npy:
            fprtpr_saved_dir = GS.FPR_TPR_DIR
            # 保存fpr, tpr到npy文件，以便后续加载绘制roc曲线图
            _, y_pred_proba = predict_class_prob(model, x_test, bat_size=10000, model_type='keras_dnn_lstm')
            fpr, tpr, thresholds = metrics.roc_curve(y_test_r, y_pred_proba)
            cur_arr = np.array((fpr, tpr))
            np.save(f'{fprtpr_saved_dir}/fprTpr_{str_weights}_lstm_test_loop{i + 1}.npy', cur_arr)
        predicted_csv_dir = GS.PREDICTED_Y_DIR
        # 预测结果并保存到文件【测试数据集】
        if save_test_pred_results:
            save_predY_toCSV(x_test, model, f'{predicted_csv_dir}/results_{str_weights}_lstm_test_loop{i + 1}.csv',
                             geoId_test_r, bat_size=10000)
        # 预测结果并保存到文件【整个数据集】
        if save_all_pred_results:
            save_predY_toCSV(x_all, model, f'{predicted_csv_dir}/results_{str_weights}_lstm_all_loop{i + 1}.csv',
                             geoId_r, bat_size=10000)

        performance_str = (f"loop: {i+1}, nb_mid_layers: {nb_mid_layers}, AUC: {d['auc']:.3f}, F1-score: {d['f1']:.3f}, Recall: {d['recall']:.3f}"
                           f", AIC: {d['aic']:.3f}, R2: {d['r2']:.3f}")
        results_lst.append(performance_str)

        auc_sum = auc_sum + d['auc']
        f1_sum = f1_sum + d['f1']
        recall_sum = recall_sum + d['recall']
        aic_sum = aic_sum + d['aic']
        r2_sum = r2_sum + d['r2']
    pprint(results_lst, sort_dicts=False, width=200)
    print(f'-----run_many_times-->>Average results of {n} times:-----------------')
    ave_result_str = f'ave AUC: {auc_sum/n:.3f}, ave f1: {f1_sum/n:.3f}, ave recall: {recall_sum/n:.3f}, ave AIC: {aic_sum/n:.3f}, ave R2: {r2_sum/n:.3f}'
    print(ave_result_str)
    return ave_result_str

if __name__ == '__main__':
    # 检查设置保存结果的各个文件夹是否存在，不存在则创建
    general_utils.check_data_settings_folder()

    # grid_search_node_and_layers(n=5, nb_max_epochs=200, cls_weight={0: 1, 1: 1},
    #                             node_nums_to_try=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 128, 256, 364, 512],
    #                             nb_mid_layers_to_try=[2, 3, 4, 5, 6, 7, 8])

    # 不同权重时，模型的精度
    # run_many_times_class_weights(n=10, nb_max_epochs=500, max_weight = 22, node_num=256, nb_mid_layers=2)

    # print('----------------------开始权重比值1:1-------------------')
    run_many_times(n=1, nb_max_epochs=2,
                   cls_weight={0: 1, 1: 1},
                   node_num=256, nb_mid_layers=2,
                   save_test_pred_results=False,
                   save_all_pred_results=False,
                   save_fprTpr_to_npy=False,
                   save_trained_model=False,
                   use_early_stop=False,
                   flatten_neighbours=False)     #最后一个参数，不展开邻居

    # print('----------------------开始权重比值1:4-------------------')
    # run_many_times(n=10, nb_max_epochs=200,
    #                cls_weight={0: 1, 1: 4},
    #                save_test_pred_results=False,
    #                save_all_pred_results=False,
    #                save_fprTpr_to_npy=False,
    #                save_trained_model=False,
    #                flatten_neighbours=False)

    # run_many_times_remove_some_factors(n=10, nb_max_epochs=500,
    #                                    cls_weight={0: 1, 1: 4})

