import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import matplotlib.pyplot as plt
from tqdm import tqdm  # 进度条
# from torchsummary import summary
import pandas as pd
import numpy as np

from wholeModel import CombinedModel

import json

class SensorDataset(Dataset):
    def __init__(self, list_file, csv_file, seq_length, label_length=20, step=1, device='cpu'):
        """
        Args:
            list_file (str): Path to the list.txt file containing sensor headers.
            csv_file (str): Path to the CSV file containing sensor data.
            seq_length (int): Length of each input sequence (seq_x).
            label_length (int): Length of each label sequence (seq_y).
            step (int): Step size for creating sequences.
            device (str): Device to store the data ('cpu' or 'cuda').
        """
        # 读取 list.txt，获取传感器名称
        with open(list_file, 'r') as f:
            self.headers = f.read().splitlines()
        
        # 读取CSV文件
        self.df = pd.read_csv(csv_file)
        
        # 检查所有传感器是否存在于CSV中
        missing_headers = [header for header in self.headers if header not in self.df.columns]
        if missing_headers:
            raise ValueError(f"以下传感器在CSV中未找到: {missing_headers}")
        
        # 检查是否存在 'Attack LABEL' 列
        if 'Attack LABEL' not in self.df.columns:
            raise ValueError("CSV 文件中未找到 'Attack LABEL' 列")
        
        # 提取传感器数据
        self.sensor_data = self.df[self.headers].values  # [num_samples, num_sensors]
        
        # 提取攻击标签
        self.attack_labels = self.df['Attack LABEL'].values  # [num_samples]
        
        # 处理缺失值（这里选择填充为0，可以根据需要调整）
        if np.isnan(self.sensor_data).any():
            self.sensor_data = np.nan_to_num(self.sensor_data, nan=0.0)
        
        self.num_sensors = len(self.headers)
        self.seq_length = seq_length
        self.label_length = label_length
        self.step = step
        self.device = device

        # 生成序列及其对应的攻击标签
        self.sequences = []
        self.attack_labels_seq = []
        for i in range(0, self.sensor_data.shape[0] - seq_length - label_length, step):
            seq_x = self.sensor_data[i:i + seq_length]  # [seq_length, num_sensors]
            seq_y = self.sensor_data[i + seq_length:i + seq_length + label_length]  # [label_length, num_sensors]
            attack_label = self.attack_labels[i + seq_length:i + seq_length + label_length]  # [label_length]
            # attack_label = np.where(attack_label == -1, 0, attack_label)
            
            self.sequences.append((seq_x, seq_y))
            self.attack_labels_seq.append(attack_label)
        
        self.sequences = np.array(self.sequences)  # [num_sequences, 2, seq_length or label_length, num_sensors]
        self.attack_labels_seq = np.array(self.attack_labels_seq)  # [num_sequences]
        self.num_sequences = len(self.sequences)

    def __len__(self):
        return self.num_sequences

    def __getitem__(self, idx):
        # 返回一个序列及其标签
        seq_x, seq_y = self.sequences[idx]  # seq_x: [seq_length, num_sensors], seq_y: [label_length, num_sensors]
        attack_label_seq = self.attack_labels_seq[idx]  # [1]
        seq_x = torch.tensor(seq_x, dtype=torch.float32, device=self.device)
        seq_y = torch.tensor(seq_y, dtype=torch.float32, device=self.device)
        attack_label_seq = torch.tensor(attack_label_seq, dtype=torch.float32, device=self.device)
        return seq_x, seq_y, attack_label_seq

def collate_fn(batch):
    """
    Collate function to combine multiple (seq_x, seq_y, attack_label_seq) tuples into batches.
    Args:
        batch (list): List of (seq_x, seq_y, attack_label_seq) tuples.
    Returns:
        batch_x (torch.Tensor): [batch_size, seq_length, num_sensors]
        batch_y (torch.Tensor): [batch_size, label_length, num_sensors]
        batch_attack_labels (torch.Tensor): [batch_size, label_length]
    """
    batch_x, batch_y, batch_attack_labels = zip(*batch)
    batch_x = torch.stack(batch_x)  # [batch_size, seq_length, num_sensors]
    batch_y = torch.stack(batch_y)  # [batch_size, label_length, num_sensors]
    batch_attack_labels = torch.stack(batch_attack_labels)  # [batch_size, label_length]
    return batch_x, batch_y, batch_attack_labels

if __name__ == "__main__":
    # 文件路径
    list_file = 'list.txt'
    csv_file = 'eval.csv'
    seq_length = 24  # 例如，每个序列包含100个时间步
    label_length = 1  # 后20个时间步作为标签
    # step = 1  # 步长，可以根据需要调整
    step = 1  # 步长，可以根据需要调整

    GAT_hidden_dim = 128
    num_sensors = 79
    group_size = 10
    GAT_output_dim = num_sensors
    predict_length = label_length
    num_attention_cycles = 3
    batch_size = 1

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device:", device)
    
    # 检查是否存在保存的模型参数
    model_path = 'model_GAT_attention_3_24_1.pth'

    print("发现保存的模型参数，正在加载...")
    model = CombinedModel(seq_length, num_sensors, group_size, GAT_output_dim,GAT_hidden_dim, predict_length, num_attention_cycles)
    # model.load_state_dict(torch.load(model_path))
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.to(device)

    # 计算模型参数量
    # print("当前模型的参数量为：",summary(model, input_size=(num_sensors, seq_length)))
    # print("")
    print("载入数据中...")
    # 创建数据集和数据加载器
    dataset = SensorDataset(list_file, csv_file, seq_length, label_length=label_length, step=step, device=device)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
    print("载入数据完成...")

    # 定义损失函数
    criterion = nn.MSELoss(reduction='mean')

    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 初始化损失列表
    i=0
    all_losses = []
    all_attack_labels = []
    threshold = 2 # 您可以根据需要调整阈值

    model.eval()
    with torch.no_grad():
        for batch_idx, (batch_x, batch_y, batch_attack_labels) in enumerate(tqdm(dataloader)):
            batch_x = batch_x.to(device)
            batch_y = batch_y.to(device)
            outputs = model(batch_x)  # [batch_size, predict_length, num_sensors]
            loss = criterion(outputs, batch_y)
            # print(f'Batch {batch_idx + 1} Loss: {loss.item():.4f}')

             # 记录每个样本的loss和attack标签
            all_losses.append(loss.item())
            all_attack_labels.append(batch_attack_labels[0].item()) 


    # 在测试循环结束后保存数据
    with open('GAT_Transformer_3_24_1.json', 'w') as f:
        json.dump({
            'all_losses': all_losses,
            'all_attack_labels': all_attack_labels
        }, f)
