from functools import cache
from re import sub
from matplotlib import pyplot as plt
import test
import torch
import torch.nn as nn
from torch import optim
import os
import time
import warnings
import numpy as np  
import pandas as pd
from sklearn.preprocessing import MinMaxScaler

from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.cluster import KMeans, MiniBatchKMeans

from exp.exp_anomaly_detection import Exp_Anomaly_Detection
from utils.tools import EarlyStopping, adjust_learning_rate, adjustment
from ext.exp.utils import report
from ext.utils.pot import pot_eval


# Threshold parameters
# lm_d = {
#     # 'SMD': [(0.99995, 1.04), (0.99995, 1.06)],
#     'SMD': [(0.99995, 1.04), (0.995, 1.06)],
#     'synthetic': [(0.999, 1), (0.999, 1)],
#     'SWaT': [(0.993, 1), (0.993, 1)],
#     'UCR': [(0.993, 1), (0.99935, 1)],
#     'NAB': [(0.991, 1), (0.99, 1)],
#     'SMAP': [(0.98, 1), (0.98, 1)],
#     'MSL': [(0.97, 1), (0.999, 1.04)],
#     'WADI': [(0.99, 1), (0.999, 1)],
#     'MSDS': [(0.91, 1), (0.9, 1.04)],
#     'MBA': [(0.87, 1), (0.93, 1.04)],
# }
# lm = lm_d[args.dataset][1 if 'TranAD' or 'DTTAD' in args.model else 0]
lm_d = {
    'SMD':{
        'TimesNet': (0.996, 1.06),
        'DTAAD': (0.998, 1.06),
        'DTAAD_mod': (0.998, 1.06),
        # 'DTAAD': (0.9945, 1.11),
    }
}

class Exp_Anomaly_Detection_v3(Exp_Anomaly_Detection):
    def __init__(self, args):
        super().__init__(args)
        
        self.lm = lm_d[args.data][args.model]

    def vali(self, vali_data, vali_loader, criterion):
        total_loss = []
        self.model.eval()
        with torch.no_grad():
            for i, (batch_x, _) in enumerate(vali_loader):
                batch_x = batch_x.float().to(self.device)
                # if self.args.use_double:
                #     batch_x = batch_x.double().to(self.device)
                # else:
                #     batch_x = batch_x.float().to(self.device)

                outputs, _ = self.model(batch_x, None, None, None)

                # f_dim = -1 if self.args.features == 'MS' else 0
                # outputs = outputs[:, :, f_dim:]
                ## ! 修改 引入新的操作方式
                if self.args.features == 'MS':
                    f_dim = -1
                    outputs = outputs[:, :, f_dim:]
                elif self.args.features == 'X': # ! 扩展 取最后一个时间点
                    outputs = outputs[:, -1:, :] 
                    batch_x = batch_x[:, -1:, :]
                else:
                    f_dim = 0
                    outputs = outputs[:, :, f_dim:]

                #############################
                pred = outputs.detach().cpu()
                true = batch_x.detach().cpu()

                loss = criterion(pred, true)
                total_loss.append(loss)
        total_loss = np.average(total_loss)
        self.model.train()
        return total_loss

    def _process_data(self, setting):
        test_data, test_loader = self._get_data(flag='test')
        train_data, train_loader = self._get_data(flag='train')
        if test:
            print('loading model')
            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))
            # if self.args.use_double:
            #     self.model = self.model.double()
            # else:
            #     self.model = self.model.float()

        attens_energy = []

        self.model.eval()
        self.anomaly_criterion = nn.MSELoss(reduce=False)

        # (1) stastic on the train set
        with torch.no_grad():
            for i, (batch_x, batch_y) in enumerate(train_loader):
                batch_x = batch_x.float().to(self.device)
                # if self.args.use_double:
                #     batch_x = batch_x.double().to(self.device)
                # else:
                #     batch_x = batch_x.float().to(self.device)
                # reconstruction
                outputs = self.model(batch_x, None, None, None)
                ### ! ##################################################
                if self.args.features == 'X': # ! 扩展 取最后一个时间点
                    outputs = outputs[:, -1:, :] 
                    batch_x = batch_x[:, -1:, :]
                elif self.args.features == 'XB':
                    T = outputs.shape[1]
                    batch_x = batch_x[:, -T:, :]
                # criterion
                score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
                score = score.detach().cpu().numpy()
                attens_energy.append(score)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)

        # (2) stastic on the test set
        attens_energy = []
        test_labels = []
        for i, (batch_x, batch_y) in enumerate(test_loader):
            batch_x = batch_x.float().to(self.device)
            # if self.args.use_double:
            #     batch_x = batch_x.double().to(self.device)
            # else:
            #     batch_x = batch_x.float().to(self.device)
            # reconstruction
            outputs = self.model(batch_x, None, None, None)
            ### ! ##################################################
            if self.args.features == 'X': # ! 扩展 取最后一个时间点
                outputs = outputs[:, -1:, :] 
                batch_x = batch_x[:, -1:, :]
                batch_y = batch_y[:, -1:]
            elif self.args.features == 'XB': # ! 扩展：取与output同样长度的最后时间点
                    T = outputs.shape[1]
                    batch_x = batch_x[:, -T:, :]
                    batch_y = batch_y[:, -T:]
            # criterion
            score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
            score = score.detach().cpu().numpy()
            attens_energy.append(score)
            test_labels.append(batch_y)

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        
        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
        test_labels = np.array(test_labels)
        return train_energy, test_energy, test_labels


    def test(self, setting, test=0):
        folder_path = './test_results/' + setting + '/'
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        
        # 检查是否存在存盘的缓存文件
        if not os.path.exists(os.path.join(folder_path, 'cached.npy')):
            train_energy, test_energy, test_labels = self._process_data(setting)
            cache_data = {
                'train_energy': train_energy,
                'test_energy': test_energy,
                'test_labels': test_labels,
            }
            np.save(os.path.join(folder_path, 'cached.npy'), cache_data)
        else:
            cache_data = np.load(os.path.join(folder_path, 'cached.npy'), allow_pickle=True).item()

            train_energy = cache_data['train_energy']
            test_energy = cache_data['test_energy']
            test_labels = cache_data['test_labels']
        
        # POT过程
        pred = pot_eval(train_energy, test_energy, test_labels, lm=self.lm, q=1e-5)
        
        pred = np.array(pred)
        gt = np.array(test_labels)
        print("pred: ", pred.shape)
        print("gt:   ", gt.shape)

        # (5) evaluation
        auc_score = roc_auc_score(gt, test_energy)
        auc_score = max(auc_score, 1 - auc_score)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')
        print("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC: {:0.4f} ".format(
            accuracy, precision,
            recall, f_score, auc_score))

        f = open("result_anomaly_detection.txt", 'a')
        f.write(setting + "  \n")
        f.write("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC: {:0.4f} ".format(
            accuracy, precision,
            recall, f_score, auc_score))
        f.write('\n')
        f.write('\n')
        f.close()
        return

  