from sklearn import metrics
import matplotlib.pyplot as plt
from scipy.special import softmax
from data.common_data_process import numpy_to_tensor, numpy_to_tensor_data, normalizeData
from torch.utils.data import TensorDataset, DataLoader
import torch.nn as nn
import torch
from config import cfg

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_curve, auc

from util import record_result


class Model():
    def __init__(self):
        self.model = self.create_model().cuda()
        self.criterion = nn.BCELoss()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=cfg.learning_rate)

    def create_model(self):
        if cfg.net == "TFClassifierForwardEmbedding":
            from model.net.TFClassifierForwardEmbedding import TransformerClassifier
            return TransformerClassifier(cfg.word_dict_size, cfg.data_row_size,
                                         cfg.d_model, cfg.num_layers,
                                         cfg.num_heads, cfg.dropout_rate)
        elif cfg.net == "TFClassifierForwardLinear":
            from model.net.TFClassifierForwardLinear import TransformerClassifier
            return TransformerClassifier(cfg.data_row_size,
                                         cfg.d_model, cfg.num_layers,
                                         cfg.num_heads, cfg.dropout_rate)
        elif cfg.net == "TFComp":
            from model.net.TFComp import TransformerClassifier
            return TransformerClassifier(cfg.d_model, cfg.num_layers,
                                         cfg.num_heads, cfg.dropout_rate)

        elif cfg.net == "DNNWithBN":
            from model.net.DNNWithBN import DeepNeuralNetworkModel
            return DeepNeuralNetworkModel(cfg.data_row_size, cfg.output_dim)

        elif cfg.net == "DNNWithLN":
            from model.net.DNNWithLN import DeepNeuralNetworkModel
            return DeepNeuralNetworkModel(cfg.data_row_size, cfg.output_dim)
        elif cfg.net == "LSTM":
            from model.net.LSTM import LSTM
            return LSTM(cfg.data_row_size, cfg.output_dim)

    def train_model(self, x_train, y_train, batch_size, num_epochs, user_id, save_model_path ='', save_model = False):

        # 创建 TensorDataset 对象

        x_train, y_train = numpy_to_tensor(x_train,y_train)

        dataset = TensorDataset(x_train, y_train)

        # 创建 DataLoader 对象
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

        for epoch in range(num_epochs):
            # 训练模型
            record_result("start training for epoch:" + str(epoch))
            self.model.train()
            for batch_x, batch_y in dataloader:
                self.optimizer.zero_grad()
                output = self.model(batch_x)
                loss = self.criterion(output, batch_y)
                loss.backward()
                self.optimizer.step()

            # 打印指标信息
            for batch_x, batch_y in dataloader:
                self.evaluate_model(batch_x, batch_y.cpu().numpy())
                break
            if save_model:
                torch.save(self.model, save_model_path + 'model_user_' + str(user_id) + "_ep_" + str(epoch) + '.pth')


    def evaluate_model(self, x_test, y_test, is_show_roc = False):
        # compute custom metrics
        self.model.eval()
        with torch.no_grad():
            if not isinstance(x_test, torch.Tensor):
                x_test = numpy_to_tensor_data(x_test)
            prediction = self.model(x_test).cpu().numpy()
        return self.calculateMetrics(y_test, prediction, is_show_roc)

    def calculateMetrics(self, target, prediction,  is_show_roc, acc_target = 0.99):
        # calculate metrics
        # 计算准确率

        acc = accuracy_score(target, prediction >= 0.5)

        # 计算精确率
        precision = precision_score(target, prediction >= 0.5)

        # 计算召回率
        recall = recall_score(target, prediction >= 0.5)

        # 计算假阳性率
        fp = self.fp_rate(target, prediction >= 0.5)

        # 计算F1分数
        F1 = f1_score(target, prediction >= 0.5)

        record_result("acc: {}".format(acc))
        record_result("precision: {}".format(precision))
        record_result("recall: {}".format(recall))
        record_result("fp: {}".format(fp))
        record_result("F1 : {}".format(F1))
        record_result("============================================")

        if is_show_roc:

            fpr, tpr, a = roc_curve(target, prediction)
            roc_auc = auc(fpr, tpr)
            plt.figure()
            plt.plot(fpr, tpr, color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
            plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
            plt.xlim([0.0, 1.0])
            plt.ylim([0.0, 1.05])
            plt.xlabel('False Positive Rate')
            plt.ylabel('True Positive Rate')
            plt.title('Receiver operating characteristic')
            plt.legend(loc="lower right")
            plt.show()
        # return results

        return (acc, recall, precision, F1, fp), acc > acc_target

    def fp_rate(self, target, prediction):
        true_positives, true_negatives = 0, 0
        false_positives, false_negatives = 0, 0

        for i in range(prediction.shape[0]):
            y = target[i]
            yHat = prediction[i]
            if y == yHat:
                if y == 1:
                    true_positives += 1
                else:
                    true_negatives += 1
            else:
                if y == 1:
                    false_negatives += 1
                else:
                    false_positives += 1
        return  false_positives / (true_negatives + false_positives+ 1e-6)

    def mode_test(self, xTest, yTest ) :
        # compute custom metrics
        xTest = normalizeData(xTest)
        xTest = xTest.reshape((1, 11))
        prediction = self.model.predict(xTest)
        results = softmax(prediction, axis=0)
        print("“1”表示有效用户， “0”表示无效用户")
        print("测试数据量："+str(len(results)))

        print("测试数据结果：")
        y = yTest[0]
        yHat = int(results[0][1] > results[0][0])
        print("样本："+str(yHat)+",测试结果："+str(y))

    def plot_metrics(history):
        metrics = ['loss', 'auc', 'accuracy']
        for n, metric in enumerate(metrics):
            name = metric
            plt.plot(history.epoch, history.history[metric], label='Train')
            plt.plot(history.epoch, history.history['val_' + metric], linestyle="--", label='Val')
            plt.xlabel('Epoch', fontsize=13)
            plt.ylabel(name, fontsize=13)
            if metric == 'loss':
                plt.ylim(bottom=0)
            elif metric == 'auc':
                plt.ylim([0, 1])
            else:
                plt.ylim([0, 1])
            plt.legend(loc='best')
            plt.show()


    def plot_roc(name, labels, predictions, **kwargs):
        fp, tp, _ = metrics.roc_curve(labels, predictions)
        plt.plot(100 * fp, 100 * tp, label=name, linewidth=2, **kwargs)
        plt.xlabel('False positives [%]')
        plt.ylabel('True positives [%]')
        plt.xlim([0, 100])
        plt.ylim([0, 100.5])
        plt.grid(True)
        ax = plt.gca()
        ax.set_aspect('equal')
