# -*- coding:utf-8 -*-
import datetime
import pandas as pd
from imblearn.pipeline import make_pipeline
from lightgbm import LGBMClassifier
from utils.landslide_utils import load_df_fromfile
from gwlsa_settings import net_params
import gwlsa_settings as GS
from utils import general_utils


def lgbm_with_neighbours(max_class_weight=2, save_results=False):
    # 检查设置保存结果的各个文件夹是否存在，不存在则创建
    general_utils.check_data_settings_folder()
    train_df, val_df, test_df = load_df_fromfile(net_params['data_load_dir'], net_params['max_distance'],
                                                 net_params['resolution'],
                                                 only_read_test=False,
                                                 file_format=net_params['data_load_format'])

    x_columns = net_params['x_column_names']
    y_column = net_params['y_column_name']
    id_column = net_params['id_column']

    train_val_df = pd.concat([train_df, val_df], ignore_index=False)
    all_df = pd.concat([train_val_df, test_df], ignore_index=False)

    id_train, X_train, y_train = general_utils.flatten_neighbours_parallel(train_val_df, x_columns, y_column, id_column, neighbours_column='neighbours')
    id_test, X_test, y_test = general_utils.flatten_neighbours_parallel(test_df, x_columns, y_column, id_column, neighbours_column='neighbours')
    X_all_flatten = pd.concat([X_train, X_test], ignore_index=True, axis=0)
    id_all_flatten = pd.concat([id_train, id_test], ignore_index=True, axis=0)
    print('展开neighbours完毕！！！\n')
    print('*'*60)

    ###############################################################################
    # Classification using lightGBM classifier with and without sampling
    ###############################################################################
    print(f'Weighted LightGBM classifier performance:')
    for i in range(1, max_class_weight+1):
        wlgb = LGBMClassifier(random_state=0, n_jobs=-1, class_weight={0: 1, 1: i}, verbose=-1)
        pipeline_wlgb = make_pipeline(wlgb)
        pipeline_wlgb.fit(X_train, y_train)
        y_pred_wlgb = pipeline_wlgb.predict(X_test)
        y_pred_wlgb_prob = pipeline_wlgb.predict_proba(X_test)[:, 1]
        fpr_wlgb, tpr_wlgb, auc_wlgb = general_utils.print_performance(y_test, y_pred_wlgb, y_pred_wlgb_prob, prefix_info=f'[class_weight 1: {i}]')

        if save_results:
            # 保存整个数据集预测结果【包含train,val和test】到csv文件
            saved_dir = GS.PREDICTED_Y_DIR
            y_pred_wlgb = pipeline_wlgb.predict(X_all_flatten)
            y_pred_wlgb_prob = pipeline_wlgb.predict_proba(X_all_flatten)[:, 1]
            # 预测结果并保存到文件【测试数据集】
            weights_str = f'w_1to{i}'
            t_name = datetime.datetime.today().strftime("%Y%m%d%H%M%S")
            save_csv_filename = f'{saved_dir}/predLgb4N_{weights_str}_{t_name}.csv'
            general_utils.save_results(id_all_flatten.values, y_pred_wlgb, y_pred_wlgb_prob, save_csv_filename, print_info=False)

        # Plot confusion matrix
        # plot_all_confusion_matrix(cm_lgb, cm_wlgb)

        # Plot ROC Curve
        # plot_roc(fpr_lgb, tpr_lgb, auc_lgb, fpr_wlgb, tpr_wlgb, auc_wlgb)

        # Save Models and Results
        # y_pred_gdbt_weighted = pipeline_wlgb.predict(X_all)
        # y_pred_proba_gdbt_weighted = pipeline_wlgb.predict_proba(X_all)[:,1]
        # result_file_gdbt_weighted = './data/wlightgbm.txt'
        # save_results(GeoID, y_pred_gdbt_weighted, y_pred_proba_gdbt_weighted, result_file_gdbt_weighted)

    print('Done!')

if __name__ == '__main__':
    lgbm_with_neighbours(max_class_weight=1, save_results=False)