import numpy as np
import torch
import torch.nn as nn
import pandas as pd
from torch.utils.data import DataLoader
from models.GDN.datasets.TimeDataset import TimeDataset
from models.GDN.models.GDN import GDN
from models.GDN.util.env import get_device, set_device
from models.GDN.util.net_struct import get_feature_map, get_fc_graph_struc
from models.GDN.util.preprocess import build_loc_net, construct_data
from models.GDN.evaluate import get_full_err_scores, get_best_performance_data
from models.GDN.model_config import get_model_config


model_path = None
dataset = None
train_config = None
device = None

def init_model(model_name='msl'):
    """
    初始化模型配置
    Args:
        model_name: 模型名称
    """
    global model_path, dataset, train_config, model, feature_map, fc_edge_index,device

    # 获取模型配置
    model_path, dataset, train_config = get_model_config(model_name)

    # 设置环境
    env_config = {
        'dataset': dataset,
        'device': 'cpu'
    }
    set_device(env_config['device'])
    device = get_device()

    # 加载特征映射和图结构
    feature_map = get_feature_map(dataset)
    fc_struc = get_fc_graph_struc(dataset)
    fc_edge_index = build_loc_net(fc_struc, list(feature_map), feature_map=feature_map)
    fc_edge_index = torch.tensor(fc_edge_index, dtype=torch.long)

    # 初始化模型
    model = GDN([fc_edge_index], len(feature_map),
                dim=train_config['dim'],
                input_dim=train_config['slide_win'],
                out_layer_num=train_config['out_layer_num'],
                out_layer_inter_dim=train_config['out_layer_inter_dim'],
                topk=train_config['topk']).to(device)
    model.load_state_dict(torch.load(model_path))
    model.eval()

# 测试函数
def test(model, dataloader):
    loss_func = nn.MSELoss(reduction='mean')
    test_loss_list = []
    test_predicted_list = []
    test_ground_list = []
    test_labels_list = []

    t_test_predicted_list = []
    t_test_ground_list = []
    t_test_labels_list = []

    model.eval()
    acu_loss = 0
    for x, y, labels, edge_index in dataloader:
        x, y, labels, edge_index = [item.to(device).float() for item in [x, y, labels, edge_index]]
        with torch.no_grad():
            predicted = model(x, edge_index).float().to(device)
            loss = loss_func(predicted, y)
            labels = labels.unsqueeze(1).repeat(1, predicted.shape[1])

            if len(t_test_predicted_list) <= 0:
                t_test_predicted_list = predicted
                t_test_ground_list = y
                t_test_labels_list = labels
            else:
                t_test_predicted_list = torch.cat((t_test_predicted_list, predicted), dim=0)
                t_test_ground_list = torch.cat((t_test_ground_list, y), dim=0)
                t_test_labels_list = torch.cat((t_test_labels_list, labels), dim=0)

        test_loss_list.append(loss.item())
        acu_loss += loss.item()

    test_predicted_list = t_test_predicted_list.tolist()
    test_ground_list = t_test_ground_list.tolist()
    test_labels_list = t_test_labels_list.tolist()

    avg_loss = sum(test_loss_list) / len(test_loss_list)
    return avg_loss, [test_predicted_list, test_ground_list, test_labels_list]


def predict(file_path):
    try:
        test_data = pd.read_csv(file_path)
    except Exception as e:
        raise Exception(f"Error reading CSV file: {str(e)}")

    # 检查是否有 attack 列
    has_attack_column = 'attack' in test_data.columns

    if has_attack_column:
        test_labels = test_data['attack'].tolist()
        test_data = test_data.drop(columns=['attack'])
    else:
        test_labels = [0] * len(test_data)

    # 数据预处理
    test_dataset_indata = construct_data(test_data, feature_map, labels=test_labels)

    cfg = {
        'slide_win': train_config['slide_win'],
        'slide_stride': train_config['slide_stride'],
    }
    test_dataset = TimeDataset(test_dataset_indata, fc_edge_index, mode='test', config=cfg)
    test_dataloader = DataLoader(test_dataset, batch_size=train_config['batch'],
                                 shuffle=False, num_workers=0)

    # 进行测试
    _, test_result = test(model, test_dataloader)

    # 初始化指标
    f1_score = None
    precision = None
    recall = None
    anomaly_indices = []

    if has_attack_column:
        # 获取评估指标
        feature_num = len(test_result[0][0])
        np_test_result = np.array(test_result)
        test_labels = np_test_result[2, :, 0].tolist()

        test_scores, _ = get_full_err_scores(test_result, test_result)
        top1_best_info = get_best_performance_data(test_scores, test_labels, topk=1)

        f1_score = top1_best_info[0]
        precision = top1_best_info[1]
        recall = top1_best_info[2]

        # 获取异常时间点
        total_features = test_scores.shape[0]
        topk_indices = np.argpartition(test_scores, range(total_features - 1, total_features), axis=0)[-1:]
        total_topk_err_scores = np.sum(np.take_along_axis(test_scores, topk_indices, axis=0), axis=0)
        thresold = top1_best_info[4]
        pred_labels = (total_topk_err_scores > thresold).astype(int)
        anomaly_details = []
        # 获取异常时间点
        total_features = test_scores.shape[1]  # 修改：使用shape[1]获取特征数量
        # 遍历每个时间点
        for t in range(len(pred_labels)):
            if pred_labels[t] == 1:  # 如果是异常点
                # 获取该时间点的所有特征值
                feature_scores = test_scores[:, t]
                # 设定特征异常阈值
                feature_threshold = np.mean(feature_scores) + 2 * np.std(feature_scores)
                # 找出所有超过阈值的特征索引
                abnormal_feature_indices = np.where(feature_scores > feature_threshold)[0]

                # 如果没有特征超过阈值，至少返回得分最高的前3个特征
                if len(abnormal_feature_indices) == 0:
                    abnormal_feature_indices = np.argsort(feature_scores)[-1:]
                else:
                    # 按分数排序
                    abnormal_feature_indices = abnormal_feature_indices[
                        np.argsort(feature_scores[abnormal_feature_indices])[::-1]]

                anomaly_detail = {
                    'time_point': t,
                    'features': [{
                        'name': feature_map[idx] if idx < len(feature_map) else str(idx)
                    } for idx in abnormal_feature_indices]
                }
                anomaly_details.append(anomaly_detail)

     # 更新返回结果
    result = {
        "F1 score": f1_score if f1_score is not None else "N/A",
        "precision": precision if precision is not None else "N/A",
        "recall": recall if recall is not None else "N/A",
        "Anomaly details": anomaly_details  # 包含详细的异常信息
    }

    return result


def detect_anomalies(model_name, file_path):
    try:
        # 在参数更新后再初始化模型
        init_model(model_name)

        # 进行预测
        result = predict(file_path)
        return result
    except Exception as e:
        raise Exception(f"检测过程出错: {str(e)}")