#!/usr/bin/env python
# coding: utf-8
import os
# 强制使用 torch 后端
os.environ["KERAS_BACKEND"] = "torch"

import torch
import keras
from keras import layers, optimizers, metrics, callbacks
from tqdm.keras import TqdmCallback
from utils.general_utils import prepare_data, evaluate_metrics_plotROC
from utils.landslide_utils import predict_class_prob, F1Score

# 早期停止回调
early_stop = callbacks.EarlyStopping(monitor='val_f1', patience=15, verbose=2, mode='auto')


def net_dnn(input_shape=(18, 1),
            nb_mid_layers=5,
            nb_class=2,
            node_num=64,
            lr_rate=0.001,
            loss_fun='categorical_crossentropy',
            drop_out=True,
            drop_percentage=0.5):
    print("Keras backend:", keras.config.backend())
    print("PyTorch CUDA available:", torch.cuda.is_available())
    if torch.cuda.is_available():
        print("PyTorch current device:", torch.cuda.current_device())
        print("PyTorch device name:", torch.cuda.get_device_name(0))

    """创建DNN模型"""
    model = keras.Sequential()

    # 添加一个独立的输入层
    model.add(keras.layers.InputLayer(shape=input_shape))

    # 接下来的第一个Dense层不再需要指定input_shape
    model.add(layers.Dense(node_num, activation='relu'))  # 移除了input_shape参数

    # 中间层
    for i in range(nb_mid_layers - 1):
        model.add(layers.Dense(node_num, activation='relu'))
        if drop_out:
            model.add(layers.Dropout(drop_percentage))

    # 展平层
    model.add(layers.Flatten())

    if drop_out:
        model.add(layers.Dropout(drop_percentage))

    # 输出前的全连接层
    model.add(layers.Dense(node_num, activation='relu'))

    # 输出层
    model.add(layers.Dense(nb_class, activation='softmax'))

    # 编译模型
    model.compile(
        optimizers.Nadam(learning_rate=lr_rate),
        loss=loss_fun,
        metrics=[metrics.AUC(), F1Score(), metrics.Recall()]
    )

    return model


def fit(x_train, y_train, nb_epochs=100, verbose=0,
        input_shape=(18, 1), nb_mid_layers=5,
        node_num=64,
        lr_rate=0.0001,
        loss_fun='categorical_crossentropy',
        class_weight=None,
        bat_size=1000,
        val_split=0.2,
        val_data=None,
        use_early_stop=True):
    """训练DNN模型"""
    model = net_dnn(
        input_shape=input_shape,
        nb_mid_layers=nb_mid_layers,
        nb_class=2,
        node_num=node_num,
        lr_rate=lr_rate,
        loss_fun=loss_fun
    )

    # 回调函数列表
    callback_list = [TqdmCallback(verbose=verbose)]
    if use_early_stop:
        callback_list.append(early_stop)

    # 训练模型
    history = model.fit(
        x_train, y_train,
        validation_split=val_split,
        validation_data=val_data,
        shuffle=True,
        batch_size=bat_size,
        epochs=nb_epochs,
        verbose=verbose,
        class_weight=class_weight,
        callbacks=callback_list
    )

    return model, history


def run_once_dnn(x_train,
                 y_train_onehot,
                 x_test,
                 y_test_onehot,
                 y_test_not_onehot,
                 channels,
                 nb_epochs,
                 nb_mid_layers,
                 cls_weight,
                 nb_pca_or_nb_features,
                 return_pred_y=False,
                 verbose=0,
                 node_num=64,
                 lr_rate=0.0001,
                 bat_size=1000,
                 val_split=0.2,
                 val_data=None,
                 show_roc=False,
                 use_early_stop=True):
    '''
    执行一次拟合和预测
    Parameters
    ----------
    x_train_pca
    y_train_onehot
    x_test_pca
    y_test
    channels
    nb_epochs
    nb_mid_layers
    cls_weight
    nb_pca_or_nb_features
    return_pred_y

    Returns
    -------
    '''
    # 1：利用训练数据拟合模型
    model, history = fit(x_train,
                         y_train_onehot,
                         nb_epochs=nb_epochs,
                         verbose=verbose,
                         input_shape=(nb_pca_or_nb_features, channels),
                         nb_mid_layers=nb_mid_layers,
                         class_weight=cls_weight,
                         node_num=node_num,
                         lr_rate=lr_rate,
                         bat_size=bat_size,
                         val_split=None,
                         val_data=val_data,
                         use_early_stop=use_early_stop
                         )
    # 2：执行预测
    title = f'dnn train {nb_epochs} epochs'
    # print(x_test_pca.shape, x_test_pca.dtype)
    y_pred_class, y_pred_proba = predict_class_prob(model, x_test, bat_size)
    # 3：计算精度
    n = len(y_test_not_onehot)
    k = x_test.shape[1]
    return_AIC_params = {
        'return_AIC': True,
        'n': n,
        'k': k
    }
    metrics_dict = evaluate_metrics_plotROC(y_test_not_onehot, y_pred_class, y_pred_proba,
                                            title=title,
                                            show_roc=show_roc,
                                            return_AIC_params=return_AIC_params)
    # --------------------------------------------------------------------------------------------------
    if return_pred_y:
        return [y_pred_class, y_pred_proba], model, history
    else:
        return metrics_dict, model, history

def run_forpca_manytimes(start_pca_num, end_pca_num, x_train, x_test,
                         y_train, y_test,
                         channels, nb_epochs, nb_mid_layers, cls_weight):
    '''
    不同数量PCA分量时拟合预测

    Parameters
    ----------
    start_pca_num
    end_pca_num
    x_train
    x_test
    y_train
    y_test
    channels
    nb_epochs
    nb_mid_layers
    cls_weight

    Returns
    -------

    '''
    sum_results = {}
    for nb_pca_components in range(start_pca_num, end_pca_num + 1):
        x_train_new, y_train_onehot, x_test_new = prepare_data(x_train, x_test, y_train,
                     nb_pca_or_nb_features=nb_pca_components,
                     channels=channels,
                     do_pca=True,
                     x_reshape_format=None,
                     return_transformer=False)
        sum_results[nb_pca_components], model, hist = run_once_dnn(x_train_new, y_train_onehot, x_test_new, y_test,
                                                                   channels, nb_epochs, nb_mid_layers,
                                                                   cls_weight,
                                                                   nb_pca_components,
                                                                   do_pca=True)

    return sum_results