import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import matplotlib.pyplot as plt
from tqdm import tqdm  # 进度条
# from torchsummary import summary

from wholeModel import CombinedModel
from dataProcess import SensorDataset

if __name__ == "__main__":
    # 文件路径
    list_file = 'list.txt'
    csv_file = 'train.csv'
    seq_length = 24  # 历史时间步长度为24
    label_length = 1  # 后1个时间步作为标签
    step = 1  # 步长，可以根据需要调整

    GAT_hidden_dim = 128
    num_sensors = 79
    group_size = 10
    GAT_output_dim = num_sensors
    predict_length = label_length
    num_attention_cycles = 3
    batch_size = 32

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("device:", device)
    
    # 检查是否存在保存的模型参数
    model_path = "model_GAT_attention_3_24_1.pth"
    if os.path.exists(model_path):
        print("发现保存的模型参数，正在加载...")
        model = CombinedModel(seq_length, num_sensors, group_size, GAT_output_dim,GAT_hidden_dim, predict_length, num_attention_cycles)
        # model.load_state_dict(torch.load(model_path))
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.to(device)
    else:
        # 创建模型
        model = CombinedModel(seq_length, num_sensors, group_size, GAT_output_dim,GAT_hidden_dim, predict_length, num_attention_cycles)
        model.to(device)
    # 计算模型参数量
    print("载入数据中...")
    # 创建数据集和数据加载器
    
    dataset = SensorDataset(list_file, csv_file, seq_length, label_length=label_length, step=step, device=device)
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    # 强制加载所有数据（显示详细进度）
    # print("数据预加载进度：")
    # _ = [batch for batch in tqdm(dataloader, total=len(dataloader))]
    print("载入数据完成...")

    # 定义损失函数
    criterion = nn.MSELoss(reduction='mean')

    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 初始化损失列表
    losses = []

    # 训练模型
    num_epochs = 3
    for epoch in range(num_epochs):
        model.train()
        epoch_loss = 0.0
        print(f"Epoch {epoch + 1}/{num_epochs}")
        
        # 使用 tqdm 包装 dataloader
        for batch_idx, (batch_x, batch_y) in enumerate(tqdm(dataloader, desc="Training", leave=False)):
            batch_x = batch_x.to(device)  # [batch_size, seq_length, num_sensors]
            batch_y = batch_y.to(device)  # [batch_size, label_length, num_sensors]

            optimizer.zero_grad()
            outputs = model(batch_x)  # [batch_size, predict_length, num_sensors]

            loss = criterion(outputs, batch_y)
            # print("loss:",loss)
            # print("outputs:",outputs)
            # print("batch_y:",batch_y)
            epoch_loss += loss.item()

            loss.backward()
            optimizer.step()

        avg_loss = epoch_loss / len(dataloader)
        losses.append(avg_loss)
        print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}')

        # 保存模型参数
        torch.save(model.state_dict(), model_path)
        print(f"模型参数已保存到 {model_path}")
    
    # 绘制并保存损失曲线
    plt.figure(figsize=(10, 6))
    plt.plot(range(1, num_epochs + 1), losses, marker='o', linestyle='-', color='b')
    plt.title('训练损失随 Epoch 下降的过程')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.savefig('loss_curve.png')  # 保存图像
    plt.close()  # 关闭图像以释放内存

    print("损失曲线已保存为 'loss_curve.png'")
