from functools import cache
from re import sub
from matplotlib import pyplot as plt
import test
import torch
import torch.nn as nn
from torch import optim
import os
import time
import warnings
import numpy as np  
import pandas as pd
from sklearn.preprocessing import MinMaxScaler

from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.manifold import TSNE

from exp.exp_anomaly_detection import Exp_Anomaly_Detection
from utils.tools import EarlyStopping, adjust_learning_rate, adjustment
from ext.exp.utils import report


class Exp_Anomaly_Detection_v2(Exp_Anomaly_Detection):
    def __init__(self, args):
        super(Exp_Anomaly_Detection_v2, self).__init__(args)

    def vali(self, vali_data, vali_loader, criterion):
        total_loss = []
        self.model.eval()
        with torch.no_grad():
            for i, (batch_x, _) in enumerate(vali_loader):
                batch_x = batch_x.float().to(self.device)

                outputs, _ = self.model(batch_x, None, None, None)

                # f_dim = -1 if self.args.features == 'MS' else 0
                # outputs = outputs[:, :, f_dim:]
                ## ! 修改 引入新的操作方式
                if self.args.features == 'MS':
                    f_dim = -1
                    outputs = outputs[:, :, f_dim:]
                elif self.args.features == 'X': # ! 扩展 取最后一个时间点
                    outputs = outputs[:, -1:, :] 
                    batch_x = batch_x[:, -1:, :]
                else:
                    f_dim = 0
                    outputs = outputs[:, :, f_dim:]

                #############################
                pred = outputs.detach().cpu()
                true = batch_x.detach().cpu()

                loss = criterion(pred, true)
                total_loss.append(loss)
        total_loss = np.average(total_loss)
        self.model.train()
        return total_loss

    def _process_data(self, setting):
        test_data, test_loader = self._get_data(flag='test')
        train_data, train_loader = self._get_data(flag='train')
        if test:
            print('loading model')
            self.model.load_state_dict(torch.load(os.path.join('./checkpoints/' + setting, 'checkpoint.pth')))

        attens_energy = []
        emb_features = []

        self.model.eval()
        self.anomaly_criterion = nn.MSELoss(reduce=False)

        # (1) stastic on the train set
        with torch.no_grad():
            for i, (batch_x, batch_y) in enumerate(train_loader):
                batch_x = batch_x.float().to(self.device)
                # reconstruction
                outputs, emb = self.model(batch_x, None, None, None)
                # criterion
                score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
                score = score.detach().cpu().numpy()
                attens_energy.append(score)
                emb_features.append(emb.detach().cpu().numpy())

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        train_energy = np.array(attens_energy)
        emb_dim = emb_features[0].shape[-1]
        emb_features = np.concatenate(emb_features, axis=0).reshape(-1, emb_dim)
        train_emb = np.array(emb_features)

        # (2) find the threshold
        attens_energy = []
        test_labels = []
        emb_features = []
        for i, (batch_x, batch_y) in enumerate(test_loader):
            batch_x = batch_x.float().to(self.device)
            # reconstruction
            outputs, emb = self.model(batch_x, None, None, None)
            # criterion
            score = torch.mean(self.anomaly_criterion(batch_x, outputs), dim=-1)
            score = score.detach().cpu().numpy()
            attens_energy.append(score)
            test_labels.append(batch_y)
            emb_features.append(emb.detach().cpu().numpy())

        attens_energy = np.concatenate(attens_energy, axis=0).reshape(-1)
        test_energy = np.array(attens_energy)
        emb_features = np.concatenate(emb_features, axis=0).reshape(-1, emb_dim)
        test_emb = np.array(emb_features)
        return train_energy, test_energy, train_emb, test_emb, test_labels

    def test(self, setting, test=0):
        folder_path = './test_results/' + setting + '/'
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

        # 检查是否存在存盘的缓存文件
        if not os.path.exists(os.path.join(folder_path, 'cached.npy')):
            train_energy, test_energy, train_emb, test_emb, test_labels = self._process_data(setting)
            cache_data = {
                'train_energy': train_energy,
                'test_energy': test_energy,
                'train_emb': train_emb,
                'test_emb': test_emb,
                'test_labels': test_labels,
            }
            np.save(os.path.join(folder_path, 'cached.npy'), cache_data)
        else:
            cache_data = np.load(os.path.join(folder_path, 'cached.npy'), allow_pickle=True).item()

            train_energy = cache_data['train_energy']
            test_energy = cache_data['test_energy']
            train_emb = cache_data['train_emb']
            test_emb = cache_data['test_emb']
            test_labels = cache_data['test_labels']

        # (3) evaluation on the test set
        # pred = (test_energy > threshold).astype(int)
        test_labels = np.concatenate(test_labels, axis=0).reshape(-1)
        test_labels = np.array(test_labels)
        gt = test_labels.astype(int)
        # print("pred:   ", pred.shape)
        print("gt:     ", gt.shape)

        # 加入自己的代码 ##########################
        # 原有代码替换为函数调用
        pred = self.cluster_based_anomaly_detection(
           folder_path, train_emb, test_emb, train_energy, test_energy, gt
        )
        print("pred:   ", pred.shape)
        
        self._visualize_emb(folder_path, train_emb, test_emb,
                            train_labels=np.zeros(len(train_emb)),
                            test_labels=test_labels)
        
        ## 保留 原始的 pred #########
        raw_pred = np.array(pred)
        #############################

        # (4) detection adjustment
        gt, pred = adjustment(gt, pred)

        pred = np.array(pred)
        gt = np.array(gt)
        print("pred: ", pred.shape)
        print("gt:   ", gt.shape)

        # 自己加入代码 #############################################
        report(folder_path, gt, pred, raw_pred, test_energy)
        ###########################################################

        # (5) evaluation
        auc_score = roc_auc_score(gt, test_energy)
        auc_score = max(auc_score, 1 - auc_score)
        accuracy = accuracy_score(gt, pred)
        precision, recall, f_score, support = precision_recall_fscore_support(gt, pred, average='binary')
        print("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC: {:0.4f} ".format(
            accuracy, precision,
            recall, f_score, auc_score))

        f = open("result_anomaly_detection.txt", 'a')
        f.write(setting + "  \n")
        f.write("Accuracy : {:0.4f}, Precision : {:0.4f}, Recall : {:0.4f}, F-score : {:0.4f}, AUC: {:0.4f} ".format(
            accuracy, precision,
            recall, f_score, auc_score))
        f.write('\n')
        f.write('\n')
        f.close()
        return

    #     return pred

    # 原来自带的方法
    # def cluster_based_anomaly_detection(self, folder_path, train_emb, test_emb, train_energy, test_energy, gt):

    #     combined_energy = np.concatenate([train_energy, test_energy], axis=0)
    #     threshold = np.percentile(combined_energy, 100 - self.args.anomaly_ratio)
    #     print("Threshold :", threshold)
    #     pred = (test_energy > threshold).astype(int)
    #     K = 10
    #     cluster = self._analysis(K, train_emb, test_emb, train_energy, test_energy, gt)
    #     self._draw(folder_path, cluster, threshold)

    #     return pred

    def cluster_based_anomaly_detection(self, folder_path, train_emb, test_emb, train_energy, test_energy, gt):

        combined_energy = np.concatenate([train_energy, test_energy], axis=0)
        threshold = np.percentile(combined_energy, 100 - self.args.anomaly_ratio)
        print("Threshold :", threshold)
        # pred = (test_energy > threshold).astype(int)
        pred = np.zeros(len(test_energy)).astype(int)

        K = 10
        cluster, train_labels, test_labels = self._analysis(K, train_emb, test_emb, train_energy, test_energy, gt)

        self._draw(folder_path, cluster, threshold, pic_filename='scatter_plot.png')

        c_data = []
        for i in range(K):
            c_data.append([cluster[i]['threshold'], cluster[i]['err_train_mean'], cluster[i]['err_train_std'],])

        # 有些聚类可能是大部分的异常样本组成的，根据阈值的大小，可以分成两类
        kmeans_threh = KMeans(n_clusters=2, random_state=0).fit(c_data)
        normal_cluster_label = np.argmin(kmeans_threh.cluster_centers_[:,0])

        for i in range(K):
            if kmeans_threh.labels_[i] == normal_cluster_label:
                if cluster[i]['threshold'] < threshold: # 聚集体内大概率是正常样本
                    if self.args.anomaly_ratio_list[0] == 0:
                        print(f"cluster: {i} Threshold: {cluster[i]['threshold']:.4f} => +Inf ") 
                    else:
                        thresh = np.percentile(test_energy[test_labels == i], 100 - self.args.anomaly_ratio_list[0])
                        pred[test_labels == i] = (test_energy[test_labels == i] > thresh).astype(int)
                        print(f"cluster: {i} Threshold: {cluster[i]['threshold']:.4f} => {thresh:.4f}") 
                else: # 聚集是混杂的, 存在异常样本，但是大部分是正常样本
                    thresh = np.percentile(test_energy[test_labels == i], 100 - self.args.anomaly_ratio_list[1])
                    pred[test_labels == i] = (test_energy[test_labels == i] > thresh).astype(int)
                    print(f"cluster: {i} Threshold: {cluster[i]['threshold']:.4f} => {thresh:.4f}") 
            else:  # 聚集是混杂的, 有大量异常样本，使用最低的阈值 
                thresh = np.percentile(test_energy[test_labels == i], 100 - self.args.anomaly_ratio_list[2])
                pred[test_labels == i] = (test_energy[test_labels == i] > thresh).astype(int)
                print(f"cluster: {i} Threshold: {cluster[i]['threshold']:.4f} => {thresh:.4f}")

                # # 实验： 将这个聚集体构造成新的子集数据进行进一步分析, 细分后的效果不好
                # train_emb_sub = train_emb[train_labels == i]
                # train_energy_sub = train_energy[train_labels == i]
                # test_emb_sub = test_emb[test_labels == i]
                # test_energy_sub = test_energy[test_labels == i]
                # gt_sub = gt[test_labels == i]
                # sub_pred = self._deep_step_analysis(
                #     folder_path, thresh, K, i, train_emb_sub, test_emb_sub, train_energy_sub, test_energy_sub, gt_sub
                # )
                # pred[test_labels == i] = sub_pred

        return pred

    def _analysis(self, K, train_emb, test_emb, train_energy, test_energy, gt):
        ### ! 对嵌入特征进行聚类
        kmeans = MiniBatchKMeans(n_clusters=K, random_state=0)
        train_labels = kmeans.fit_predict(train_emb)
        test_labels = kmeans.predict(test_emb)

        all_label = np.concatenate([train_labels, test_labels], axis=0)
        # std, mean = np.std(train_energy), np.mean(train_energy)

        ## 在每个聚集内选择阈值
        cluster = {}
        for i in range(K):
            cluster_energy = np.concatenate([train_energy[train_labels == i], test_energy[test_labels == i]], axis=0) 
            failure_ratio = sum(gt[test_labels == i])/len(test_labels == i)

            cluster_size = sum(all_label == i)
            cluster_train_size = sum(train_labels == i)

            std_train, mean_train = np.std(train_energy[train_labels==i]), np.mean(train_energy[train_labels==i])
            std_test, mean_test = np.std(test_energy[test_labels==i]), np.mean(test_energy[test_labels==i])

            cluster[i] = {
                # 'cluster_id': i,
                'cluster_size': cluster_size,
                'train_ratio': cluster_train_size / cluster_size,
                'threshold': np.percentile(cluster_energy, 100 - self.args.anomaly_ratio),
                "failure_ratio_test": failure_ratio,

                'err_train_std': std_train,
                'err_train_mean': mean_train,

                'err_test_std': std_test,
                'err_test_mean': mean_test,
                'fea_train_center': np.mean(np.abs(train_emb[train_labels==i])),
                'fea_test_center': np.mean(np.abs(test_emb[test_labels==i])),
            }
        return cluster, train_labels, test_labels

    def _draw(self, folder_path, cluster, threshold, pic_filename):
        df = pd.DataFrame.from_dict(cluster).transpose().reset_index(drop=True)
        df.to_csv(os.path.join(folder_path, 'cluster_output_scaled.csv'), index=False, float_format='%.4f', encoding='utf-8')

        # 绘制散点图（可根据实际需求修改x_col和y_col）
        # 以df数据来画散点图
        fig = plt.figure(figsize=(10, 6))
        labels  = df.index.tolist()
        plt.scatter(labels, np.log(df['err_train_mean']), c=df.index, cmap='viridis', marker='o',
                    alpha=0.7, s=10 * df['err_train_std'], label='train')   
        plt.scatter(labels, np.log(df['err_test_mean']),c=df.index, cmap='viridis', marker='v',
                    alpha=0.7, s=10 * df['err_test_std'], label='test')
        plt.scatter(labels, np.log(df['threshold']), c='r', marker='x', 
                    alpha=0.7, s=20, label='cluster threshold')

        plt.axhline(y=np.log(threshold), color='r', linestyle='--', label='Threshold')
        plt.legend()

        plt.xlabel('Cluster ID')
        plt.ylabel('log(err_mean)')
        plt.title('err_mean scatter')
        # plt.ylim(top=8, bottom=0) 

        plt.grid(True)
        # plt.show()
        # 如需保存图片可取消下面一行注释
        plt.savefig(os.path.join(folder_path, pic_filename), dpi=300, bbox_inches='tight')
        plt.close()

    def _deep_step_analysis(self, folder_path, thresh, K, i, 
                            train_emb_sub, test_emb_sub, train_energy_sub, test_energy_sub, gt_sub):
        sub_cluster, train_labels_sub, test_labels_sub = self._analysis(
            K, train_emb_sub, test_emb_sub, train_energy_sub, test_energy_sub, gt_sub
        )
        self._draw(
            folder_path, sub_cluster, thresh, pic_filename="subcluster_scatter_plot.png"
        )
        # s_data = []
        # for j in range(K):
        #     s_data.append([sub_cluster[j]['threshold']])
        # sub_kmeans_threh = KMeans(n_clusters=2, random_state=0).fit(s_data)
        # failure_cluster_label = np.argmax(sub_kmeans_threh.cluster_centers_[:,0])

        sub_pred = np.zeros(len(test_energy_sub)).astype(int)
        for j in range(K):
            if sub_cluster[j]['err_train_mean'] >= thresh:
                sub_pred[test_labels_sub == j] = (
                    test_energy_sub[test_labels_sub == j] > thresh
                ).astype(int)
                print(
                    f"cluster: {i} - {j} Threshold: {sub_cluster[j]['threshold']:.4f} => {thresh:.4f}"
                )
            else:
                # thresh2 = np.percentile(test_energy_sub[test_labels_sub == j], 100 - 1)
                # sub_pred[test_labels_sub == j] = (test_energy_sub[test_labels_sub == j] > thresh2).astype(int)
                thresh2 = "+Inf"
                print(
                    f"cluster: {i} - {j} Threshold: {sub_cluster[j]['threshold']:.4f} => {thresh2} "
                )
        return sub_pred

    def _visualize_emb(self, folder_path, train_emb, test_emb, train_labels, test_labels):
        # 输入的train_emb, test_emb, train_labels, test_labels只取前1000个样本进行可视化
        m = 20000
        train_emb = train_emb[:m]
        test_emb = test_emb[:m] 
        train_labels = train_labels[:m]
        test_labels = test_labels[:m]
        test_labels[test_labels==0] = 2  # 将正常样本标记为2，异常样本标记为1，便于可视化区分
        
        tsne = TSNE(n_components=2, random_state=0)
        combined_emb = np.concatenate([train_emb, test_emb], axis=0)
        combined_labels = np.concatenate([train_labels, test_labels], axis=0)
        emb_2d = tsne.fit_transform(combined_emb)

        plt.figure(figsize=(10, 8))
        scatter = plt.scatter(emb_2d[combined_labels==0][:, 0], emb_2d[combined_labels==0][:, 1], c='y', alpha=0.7)
        scatter = plt.scatter(emb_2d[combined_labels==2][:, 0], emb_2d[combined_labels==2][:, 1], c='b', alpha=0.7)
        scatter = plt.scatter(emb_2d[combined_labels==1][:, 0], emb_2d[combined_labels==1][:, 1], c='r', alpha=0.7)
        
        plt.colorbar(scatter)
        plt.title('t-SNE Visualization of Embeddings')
        plt.xlabel('Dimension 1')
        plt.ylabel('Dimension 2')
        plt.legend(['Train Normal', 'Test Normal', 'Test Anomaly'], title='Labels')
        plt.grid(True)
        plt.savefig(os.path.join(folder_path, 'tsne_embeddings.png'), dpi=300, bbox_inches='tight')
        plt.close()