# coding: utf-8

import os
import torch
from build_gpt import GPT, GPTConfig
from build_dataset import BuildDataset
from torch.utils.data import DataLoader

# 定义模型并使用DataParallel包裹模型以支持多GPU训练
model = GPT(GPTConfig())
device = "cuda" if torch.cuda.is_available() else "cpu"
device_ids = [0, 1]
model = model.to(device)
model = torch.nn.DataParallel(model, device_ids=device_ids)

total_params = sum(p.numel() for p in model.parameters())
print(f"Number of parameters: {total_params / 1e6:.2f}M")

optimizer = torch.optim.AdamW(model.parameters(), lr=3e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=1000)

def train(model, epoch, optimizer, scheduler, train_loader, val_loader, device):
    model.train()
    total_loss = 0
    for batch_idx, (x, y) in enumerate(train_loader):
        # 将数据移到设备上
        x = x.to(device)
        y = y.to(device)
        # 前向传播
        logits, loss = model(x, targets=y)
        # 反向传播
        # 多卡训练时，loss输出不是一个标量，直接使用loss会报错：grad can be implicitly created only for scalar outputs
        # 需要对dim > 0 的loss进行求和，使其成为一个标量
        if loss.dim() > 0:
            loss = loss.mean()
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # 更新学习率
        scheduler.step()
        # 记录损失
        total_loss += loss.item()
        if batch_idx % 10 == 0:
            print(f"Train Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.6f}")
    return total_loss  # 移动return语句到循环外

def eval(model, val_loader, device):
    model.eval()
    val_loss = 0
    with torch.no_grad():
        for x, y in val_loader:
            x = x.to(device)
            y = y.to(device)
            logits, loss = model(x, targets=y)
            if loss.dim() > 0:
                loss = loss.mean()
            val_loss += loss.item()
    return val_loss

dataset = BuildDataset(path="./datasets/my_dataset.jsonl")
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [0.9, 0.1])
train_loader = DataLoader(train_dataset, batch_size=12, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=12, shuffle=False)

save_dir = "./checkpoints"
best_val_loss = 100  # 设置loss为无穷大
for epoch in range(2):
    train_loss = train(model, epoch, optimizer, scheduler, train_loader, val_loader, device)
    val_loss = eval(model, val_loader, device)

    # 保存模型
    avg_val_loss = val_loss / len(val_loader)
    checkpoint = {
        "epoch": epoch,
        "model_state_dict": model.state_dict(),
        "optimizer_state_dict": optimizer.state_dict(),
        "scheduler_state_dict": scheduler.state_dict(),
        "val_loss": avg_val_loss,
    }
    # torch.save(checkpoint, os.path.join(save_dir, f"model_epoch_{epoch}.pt"))
    if avg_val_loss < best_val_loss:
        best_val_loss = avg_val_loss
        # 保存最优模型
        torch.save(checkpoint, os.path.join(save_dir, f"best_model.pt"))
    print(f"Epoch: {epoch}, Train Loss: {train_loss/len(train_loader):.6f}, Val Loss: {val_loss/len(val_loader):.6f}")
    