"""
系统级别偏差 (System-Level Deviation, SLD) 和
实例级别偏差 (Instance-Level Deviation, ILD)

本模块实现了基于ART模型的偏差计算方法，用于时序异常检测和根因分析。
"""

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from .layer import create_simple_dataloader_AR


def extract_sld_ild_tensors(model, test_samples, top_k_value=3):
    """
    同时提取系统级别偏差(SLD)和实例级别偏差(ILD)的张量表示
    
    按照论文描述的方法：
    1. 计算每个实例的偏差向量
    2. 计算每个实例偏差向量的L1范数
    3. 选择L1范数最大的top-k个实例
    4. 使用这些实例的L1范数作为权重计算SLD
    
    参数:
        model: 训练好的ART模型
        test_samples: 测试样本列表
        top_k_value: 选择的top-k实例数量
        
    返回:
        tuple: (sld_tensor, ild_tensor)
            - sld_tensor: shape为[win_size, 1, channel_dim]的numpy数组
            - ild_tensor: shape为[win_size, instance_num, channel_dim]的numpy数组
    """
    mse = nn.MSELoss(reduction="none")
    model.eval()
    
    all_sld = []
    all_ild = []
    
    dataloader = create_simple_dataloader_AR(test_samples, batch_size=128, shuffle=False)
    
    with torch.no_grad():
        for batch_ts, batched_graphs, batched_feats, batched_targets in dataloader:
            # 获取模型预测
            z, h = model(batched_graphs, batched_feats)
            
            # 计算每个实例在每个特征维度上的偏差
            loss = mse(h, batched_targets)  # [batch_size, instance_num, channel_dim]
            
            # 保存ILD
            all_ild.append(loss.cpu().numpy())
            
            # 计算每个实例的L1范数 (在特征维度上求和)
            l1_norms = torch.sum(torch.abs(loss), dim=-1)  # [batch_size, instance_num]
            
            # 获取top-k个实例的索引
            top_k_indices = torch.topk(l1_norms, k=min(top_k_value, l1_norms.size(1)), dim=1).indices
            
            # 获取这些实例的L1范数作为权重
            weights = torch.gather(l1_norms, 1, top_k_indices)  # [batch_size, top_k]
            weights = torch.softmax(weights, dim=1)  # 归一化权重
            
            # 收集top-k实例的偏差
            batch_size = loss.size(0)
            top_k_deviations = torch.stack([
                loss[i, top_k_indices[i]] for i in range(batch_size)
            ])  # [batch_size, top_k, channel_dim]
            
            # 使用权重计算SLD
            weighted_sld = torch.sum(
                top_k_deviations * weights.unsqueeze(-1),
                dim=1,
                keepdim=True
            )  # [batch_size, 1, channel_dim]
            
            all_sld.append(weighted_sld.cpu().numpy())
    
    # 合并所有批次的结果
    sld_tensor = np.concatenate(all_sld, axis=0)
    ild_tensor = np.concatenate(all_ild, axis=0)
    
    return sld_tensor, ild_tensor


def SLD(model, test_samples, method="num", t_value=3):
    """
    计算系统级别偏差（System-Level Deviation）
    
    该函数通过比较模型预测值与实际值之间的偏差，计算系统中各组件的异常程度。
    支持两种方法：基于数量的'num'方法和基于概率的'prob'方法。
    
    参数:
        model: 训练好的ART模型
        test_samples: 测试样本列表，格式为[(timestamp, graph, features, targets), ...]
        method: 偏差计算方法，'num'表示选择固定数量的top组件，'prob'表示基于概率阈值选择
        t_value: 对于'num'方法，表示选择的top-k组件数量；对于'prob'方法，表示概率阈值
        
    返回:
        pandas.DataFrame: 包含各时间点各组件系统级偏差的数据框，列为组件ID，行索引为时间戳
    """
    mse = nn.MSELoss(reduction="none")  # 使用不降维的MSE损失函数
    system_level_deviation_df = pd.DataFrame()
    dataloader = create_simple_dataloader_AR(test_samples, batch_size=128, shuffle=False)
    model.eval()  # 设置模型为评估模式
    
    with torch.no_grad():  # 不计算梯度
        for batch_ts, batched_graphs, batched_feats, batched_targets in dataloader:
            z, h = model(batched_graphs, batched_feats)  # 获取模型预测结果
            loss = mse(h, batched_targets)  # 计算每个维度的损失，形状为[batch_size, instance_num, channel_dim]
            
            if method == "prob":  # 基于概率阈值的方法
                # 计算每个实例的最大和最小总损失，用于归一化
                max = torch.max(torch.sum(loss, dim=-1), dim=-1).values.unsqueeze(dim=-1)
                min = torch.min(torch.sum(loss, dim=-1), dim=-1).values.unsqueeze(dim=-1)
                
                # 计算归一化后的概率分布
                root_prob = torch.softmax(
                    (torch.sum(loss, dim=-1) - min) / (max - min), dim=-1
                )
                
                # 按概率降序排序
                sorted_indices = torch.argsort(root_prob, dim=1, descending=True)
                root_prob = torch.gather(root_prob, 1, sorted_indices)
                loss = torch.gather(
                    loss, 1, sorted_indices.unsqueeze(-1).expand(-1, -1, loss.size(-1))
                )
                
                # 计算累积概率和
                cumulative_sum = torch.cumsum(root_prob, dim=1)

                # 重新计算softmax概率（注：这部分代码有重复，可能是实现细节）
                root_prob = torch.softmax(torch.sum(loss, dim=-1), dim=-1)
                sorted_indices = torch.argsort(root_prob, dim=1, descending=True)
                root_prob = torch.gather(root_prob, 1, sorted_indices)
                loss = torch.gather(
                    loss, 1, sorted_indices.unsqueeze(-1).expand(-1, -1, loss.size(-1))
                )
                cumulative_sum = torch.cumsum(root_prob, dim=1)

                # 找到累积概率首次超过t_value的索引位置
                t_value_indices = torch.argmax(
                    (cumulative_sum > t_value).to(torch.int), dim=1
                )
                
                # 创建选择掩码
                selected_indices = torch.zeros_like(loss)
                for i in range(root_prob.shape[0]):
                    selected_indices[
                        i,
                        : t_value_indices[i] + 1,
                    ] = 1
                    
                # 计算系统级偏差
                system_level_deviation = torch.sum(selected_indices * loss, dim=1)
                
            elif method == "num":  # 基于数量的方法
                # 计算每个实例的总损失
                instance_deviation = torch.sum(loss, dim=-1)
                
                # 选择top-k个偏差最大的实例
                topk_values, topk_indices = torch.topk(
                    instance_deviation, k=t_value, dim=-1
                )
                
                # 创建掩码，只保留top-k实例
                mask = torch.zeros_like(instance_deviation)
                mask = mask.scatter_(1, topk_indices, 1).unsqueeze(-1)
                
                # 计算系统级偏差
                system_level_deviation = torch.sum(loss * mask, dim=1)

            # 将结果转换为DataFrame
            tmp_df = pd.DataFrame(system_level_deviation.detach().numpy())
            tmp_df["timestamp"] = batch_ts
            system_level_deviation_df = pd.concat([system_level_deviation_df, tmp_df])
            
    return system_level_deviation_df.reset_index(drop=True)


def ILD(model, test_samples):
    """
    计算实例级别偏差（Instance-Level Deviation）
    
    该函数计算每个实例（节点）的详细偏差信息，保留了所有特征维度的偏差数据。
    
    参数:
        model: 训练好的ART模型
        test_samples: 测试样本列表，格式为[(timestamp, graph, features, targets), ...]
        
    返回:
        pandas.DataFrame: 包含各时间点各实例偏差的数据框，以字符串形式存储详细偏差信息
    """
    mse = nn.MSELoss(reduction="none")  # 使用不降维的MSE损失函数
    instance_level_deviation_df = pd.DataFrame()
    dataloader = create_simple_dataloader_AR(test_samples, batch_size=128, shuffle=False)
    model.eval()  # 设置模型为评估模式
    
    with torch.no_grad():  # 不计算梯度
        for batch_ts, batched_graphs, batched_feats, batched_targets in dataloader:
            z, h = model(batched_graphs, batched_feats)  # 获取模型预测结果
            loss = mse(h, batched_targets)  # 计算每个维度的损失
            batch_size, instance_size, channel_size = loss.shape
            
            # 将损失张量转换为字符串数组，以便存储详细信息
            string_tensor = np.array(
                [str(row.tolist()) for row in loss.reshape(-1, channel_size)]
            )
            
            # 创建DataFrame并添加时间戳
            tmp_df = pd.DataFrame(string_tensor.reshape(batch_size, instance_size))
            tmp_df["timestamp"] = batch_ts
            instance_level_deviation_df = pd.concat(
                [instance_level_deviation_df, tmp_df]
            )
            
    return instance_level_deviation_df.reset_index(drop=True)


def aggregate_instance_representations(
    cases, instance_level_deviation_df, before=60, after=300
):
    """
    聚合实例表示，将ILD结果按时间窗口聚合
    
    参数:
        cases: 案例数据框，包含timestamp字段表示异常发生时间
        instance_level_deviation_df: ILD函数返回的实例级别偏差数据框
        before: 异常发生前的时间窗口（秒）
        after: 异常发生后的时间窗口（秒）
        
    返回:
        List: 聚合后的实例表示列表，每个元素对应一个案例的所有实例表示
    """
    instance_representations = []
    
    # 为每个案例创建实例表示
    for _, case in cases.iterrows():
        instance_representation = []
        
        # 根据时间窗口筛选数据
        agg_df = instance_level_deviation_df[
            (instance_level_deviation_df["timestamp"] >= (case["timestamp"] - before))
            & (instance_level_deviation_df["timestamp"] < (case["timestamp"] + after))
        ]
        
        # 处理每一列（每个实例）的数据
        for col_name, col_data in agg_df.items():
            if col_name == "timestamp":
                continue
                
            # 注释掉的代码提供了不同的聚合方法：
            # 1. 均值聚合
            # instance_representation.append(torch.mean(torch.tensor([eval(item) for item in col_data]), dim=0))
            
            # 2. 最大值聚合
            # tmp = torch.tensor([eval(item) for item in col_data])
            # max_row_index = np.argmax(tmp.sum(axis=1))
            # instance_representation.append(tmp[max_row_index])
            
            # 3. 不进行聚合，保留所有数据（当前使用的方法）
            instance_representation.extend(
                [(col_name, eval(item)) for item in col_data]
            )
            
        # 将当前案例的实例表示添加到结果列表
        # instance_representations.append(torch.stack(instance_representation))
        # return torch.stack(instance_representations)
        instance_representations.append(instance_representation)
        
    return instance_representations


def aggregate_failure_representations(
    cases, system_level_deviation_df, type_hash=None, before=60, after=300
):
    """
    聚合故障表示，将SLD结果按时间窗口聚合
    
    参数:
        cases: 案例数据框，包含timestamp字段表示异常发生时间
        system_level_deviation_df: SLD函数返回的系统级别偏差数据框
        type_hash: 故障类型哈希映射
        before: 异常发生前的时间窗口（秒）
        after: 异常发生后的时间窗口（秒）
        
    返回:
        dict: 聚合后的故障表示，键为故障类型，值为该类型的表示向量
    """
    failure_representations = {}
    for _, case in cases.iterrows():
        # 获取故障类型
        hash_value = "all"
        if type_hash is not None:
            hash_value = type_hash.get(case["timestamp"], "unknown")
            
        # 根据时间窗口筛选数据
        agg_df = system_level_deviation_df[
            (system_level_deviation_df["timestamp"] >= (case["timestamp"] - before))
            & (system_level_deviation_df["timestamp"] < (case["timestamp"] + after))
        ]
        
        if agg_df.empty:
            continue
            
        timestamp_col = agg_df["timestamp"]
        agg_df = agg_df.drop(columns=["timestamp"])
        
        # 计算聚合表示
        feature_array = agg_df.to_numpy()
        # 方法1: 使用整个时间窗口内的平均偏差
        feature = np.mean(feature_array, axis=0)
        # 方法2: 使用最大偏差（可选）
        # max_row_index = np.argmax(np.sum(feature_array, axis=1))
        # feature = feature_array[max_row_index]
        
        # 如果故障类型存在，则添加到对应列表中，否则创建新列表
        if hash_value in failure_representations:
            failure_representations[hash_value].append(feature)
        else:
            failure_representations[hash_value] = [feature]
            
    # 对每种故障类型，计算平均表示
    for hash_value in failure_representations:
        failure_representations[hash_value] = np.mean(failure_representations[hash_value], axis=0)
        
    return failure_representations


# 使用示例
# failure_representations, type_labels = aggregate_failure_representations(cases, SLD(model, test_samples, 'num', 3), type_hash, before, after)
# failure_representations, type_labels = aggregate_failure_representations(cases, SLD(model, test_samples, 'prob', 0.9), type_hash, before, after)
# instance_representations = aggregate_instance_representations(cases, ILD(model, test_samples), before, after)
