import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from sklearn.preprocessing import MinMaxScaler

class SensorDataset(Dataset):
    def __init__(self, list_file, csv_file, seq_length, label_length=20, step=1, device='cpu'):
        """
        Args:
            list_file (str): Path to the list.txt file containing sensor headers.
            csv_file (str): Path to the CSV file containing sensor data.
            seq_length (int): Length of each input sequence (seq_x).
            label_length (int): Length of each label sequence (seq_y).
            step (int): Step size for creating sequences.
            device (str): Device to store the data ('cpu' or 'cuda').
        """
        # 读取 list.txt，获取传感器名称
        with open(list_file, 'r') as f:
            self.headers = f.read().splitlines()
        
        # 读取CSV文件
        self.df = pd.read_csv(csv_file)
        
        # 检查所有传感器是否存在于CSV中
        missing_headers = [header for header in self.headers if header not in self.df.columns]
        if missing_headers:
            raise ValueError(f"以下传感器在CSV中未找到: {missing_headers}")
        
        # 提取传感器数据
        self.sensor_data = self.df[self.headers].values  # [num_samples, num_sensors]
        
        # 处理缺失值（这里选择填充为0，可以根据需要调整）
        if np.isnan(self.sensor_data).any():
            self.sensor_data = np.nan_to_num(self.sensor_data, nan=0.0)
        
        self.num_sensors = len(self.headers)
        self.seq_length = seq_length
        self.label_length = label_length
        self.step = step
        self.device = device

        # 计算每个传感器的均值和方差（如果需要用于加权损失）
        # self.means = np.mean(self.sensor_data, axis=0)  # [num_sensors]
        # self.variances = np.var(self.sensor_data, axis=0)  # [num_sensors]
        # self.weights = 1.0 / (self.variances + 1e-8)  # [num_sensors]
        # self.biases = -self.means / (self.variances + 1e-8)  # [num_sensors]
        # self.weights = torch.tensor(self.weights, dtype=torch.float32, device=self.device)
        # self.biases = torch.tensor(self.biases, dtype=torch.float32, device=self.device)

        # 生成序列
        self.sequences = []
        for i in range(0, self.sensor_data.shape[0] - seq_length - label_length + 1, step):
            seq_x = self.sensor_data[i:i + seq_length]  # [seq_length, num_sensors]
            seq_y = self.sensor_data[i + seq_length:i + seq_length + label_length]  # [label_length, num_sensors]
            self.sequences.append((seq_x, seq_y))

        # for i in range(0, self.sensor_data.shape[0] - seq_length - label_length + 1, step):
        #     # 合并seq_x和seq_y的时间窗口
        #     full_window = self.sensor_data[i:i + seq_length + label_length]  # [total_timesteps, num_sensors]
            
        #     # 逐传感器归一化
        #     normalized_window = np.zeros_like(full_window)
        #     for sensor_idx in range(self.num_sensors):
        #         # 提取当前传感器的全部时间步数据
        #         sensor_series = full_window[:, sensor_idx].reshape(-1, 1)  # 转为二维数组供scaler使用
                
        #         # 创建独立scaler实例
        #         scaler = MinMaxScaler(feature_range=(0, 1))  # 默认范围[0,1]
        #         scaler.fit(sensor_series)  # 仅用当前窗口数据计算min/max
                
        #         # 归一化整个窗口
        #         # 核心保护逻辑
        #         if scaler.data_max_[0] == scaler.data_min_[0]:  # 注意访问索引[0]
        #             normalized_series = np.zeros(sensor_series.shape[0])
        #         else:
        #             normalized_series = scaler.transform(sensor_series).flatten()
        #         normalized_window[:, sensor_idx] = normalized_series

        #     # 分割为输入和标签
        #     seq_x = normalized_window[:seq_length]       # [seq_length, num_sensors]
        #     # print("seq_x:",seq_x)
        #     seq_y = normalized_window[seq_length:]      # [label_length, num_sensors]
        #     # print("seq_y:",seq_y)

            
        #     self.sequences.append((seq_x, seq_y))
       
        self.sequences = np.array(self.sequences)  # [num_sequences, 2, seq_length or label_length, num_sensors]
        self.num_sequences = len(self.sequences)

    def __len__(self):
        return self.num_sequences

    def __getitem__(self, idx):
        # 返回一个序列及其标签
        seq_x, seq_y = self.sequences[idx]  # seq_x: [seq_length, num_sensors], seq_y: [label_length, num_sensors]
        seq_x = torch.tensor(seq_x, dtype=torch.float32, device=self.device)
        seq_y = torch.tensor(seq_y, dtype=torch.float32, device=self.device)
        return seq_x, seq_y

def collate_fn(batch):
    """
    Collate function to combine multiple (seq_x, seq_y) pairs into batches.
    Args:
        batch (list): List of (seq_x, seq_y) tuples.
    Returns:
        batch_x (torch.Tensor): [batch_size, seq_length, num_sensors]
        batch_y (torch.Tensor): [batch_size, label_length, num_sensors]
    """
    batch_x, batch_y = zip(*batch)
    batch_x = torch.stack(batch_x)  # [batch_size, seq_length, num_sensors]
    batch_y = torch.stack(batch_y)  # [batch_size, label_length, num_sensors]
    return batch_x, batch_y

class WeightedMSELoss(nn.Module):
    def __init__(self, weights, biases, label_length=20):
        """
        加权均方误差损失函数，支持多个时间步
        :param weights: 每个传感器的权重，形状为 (num_sensors,)
        :param biases: 每个传感器的偏置，形状为 (num_sensors,)
        :param label_length: 标签的长度，即预测的时间步数
        """
        super(WeightedMSELoss, self).__init__()
        self.weights = weights  # [num_sensors]
        self.biases = biases  # [num_sensors]
        self.label_length = label_length
        self.mse = nn.MSELoss(reduction='none')  # 不使用reduction

    def forward(self, predictions, targets):
        """
        前向传播
        :param predictions: 模型输出，形状为 (batch_size, label_length, num_sensors)
        :param targets: 真实标签，形状为 (batch_size, label_length, num_sensors)
        :return: 加权均方误差损失
        """
        # 应用权重和偏置
        # [batch_size, label_length, num_sensors]
        weighted_predictions = predictions * self.weights + self.biases
        weighted_targets = targets * self.weights + self.biases

        # 计算均方误差
        # [batch_size, label_length, num_sensors]
        losses = self.mse(weighted_predictions, weighted_targets)

        # 对每个时间步和传感器的损失进行加权
        # [batch_size, label_length, num_sensors]
        weighted_losses = losses * self.weights.unsqueeze(0).unsqueeze(0)  # 扩展维度以匹配 losses 的形状

        # 对所有样本、时间步和传感器求平均
        # scalar
        return torch.mean(weighted_losses)

# 示例用法
if __name__ == "__main__":
    # 文件路径
    list_file = 'list.txt'
    csv_file = 'WADI_train.csv'
    seq_length = 100  # 例如，每个序列包含100个时间步
    label_length = 20  # 后20个时间步作为标签
    batch_size = 32
    step = 1  # 步长，可以根据需要调整
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 创建数据集和数据加载器
    dataset = SensorDataset(list_file, csv_file, seq_length, label_length=label_length, step=step, device=device)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

    # 定义模型（示例为一个简单的线性回归模型）
    model = nn.Linear(seq_length * dataset.num_sensors, label_length * dataset.num_sensors).to(device)

    # 定义损失函数
    weights = dataset.weights  # [num_sensors]
    biases = dataset.biases  # [num_sensors]
    criterion = WeightedMSELoss(weights, biases, label_length=label_length)

    # 定义优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # 训练模型
    num_epochs = 10
    for epoch in range(num_epochs):
        model.train()
        epoch_loss = 0.0
        for batch_idx, (batch_x, batch_y) in enumerate(dataloader):
            batch_x = batch_x.to(device)  # [batch_size, seq_length, num_sensors]
            batch_y = batch_y.to(device)  # [batch_size, label_length, num_sensors]
            
            # 展平输入
            batch_x = batch_x.view(batch_x.size(0), -1)  # [batch_size, seq_length * num_sensors]

            # 前向传播
            outputs = model(batch_x)  # [batch_size, label_length * num_sensors]

            # 重塑输出以匹配标签的形状
            outputs = outputs.view(batch_x.size(0), label_length, dataset.num_sensors)  # [batch_size, label_length, num_sensors]

            # 计算损失
            loss = criterion(outputs, batch_y)
            epoch_loss += loss.item()

            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        
        avg_loss = epoch_loss / len(dataloader)
        print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}')

    # 测试模型
    model.eval()
    with torch.no_grad():
        for batch_idx, (batch_x, batch_y) in enumerate(dataloader):
            batch_x = batch_x.to(device)
            batch_y = batch_y.to(device)
            batch_x = batch_x.view(batch_x.size(0), -1)  # [batch_size, seq_length * num_sensors]
            outputs = model(batch_x)  # [batch_size, label_length * num_sensors]
            outputs = outputs.view(batch_x.size(0), label_length, dataset.num_sensors)  # [batch_size, label_length, num_sensors]
            loss = criterion(outputs, batch_y)
            print(f'Batch {batch_idx + 1} Loss: {loss.item():.4f}')
            break  # 仅示例，实际应用中应移除