import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import trange, tqdm
import torch.nn.functional as F
from Datasets import LyricsDataset
from Models import TransformerGenerator
from torch.utils.tensorboard import SummaryWriter

lr = 1e-4
batch_size = 64
epochs = 150
seq_len = 48

writer = SummaryWriter(log_dir="runs/lyrics")
dataset = LyricsDataset(seq_len=seq_len)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = TransformerGenerator(vocab_size=len(dataset.word2index),nhead=8,num_layers=8,d_model=512).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss(ignore_index=dataset.word2index["<PAD>"])

data_loader = DataLoader(dataset, batch_size=batch_size)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

train_loader = DataLoader(train_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)

min_loss = float('inf')
@torch.no_grad()  # ✅ 禁止梯度计算，加快验证速度，减少显存占用
def verify(epoch):
    global min_loss
    model.eval()  # ✅ 切换到评估模式（关闭 dropout 等）

    total_loss = 0
    total_correct = 0
    total_tokens = 0
    vocab_size = len(dataset.word2index)

    for x, y in test_loader:
        x = x.to(device)
        y = y.to(device)

        output = model(x)  # [batch_size, seq_len, vocab_size]

        # 计算 loss
        loss = criterion(output[:, :-1].reshape(-1, vocab_size),
                         y[:, 1:].reshape(-1))
        total_loss += loss.item()

        # ===== 计算准确率 =====
        pred = output[:, :-1].argmax(dim=-1)  # [batch_size, seq_len-1]
        target = y[:, 1:]  # [batch_size, seq_len-1]

        # ⚠️ 如果有 PAD token，要屏蔽它们
        pad_idx = dataset.word2index.get("<PAD>", None)
        if pad_idx is not None:
            mask = target != pad_idx
            correct = ((pred == target) & mask).sum().item()
            total = mask.sum().item()
        else:
            correct = (pred == target).sum().item()
            total = torch.numel(target)

        total_correct += correct
        total_tokens += total

    avg_loss = total_loss / len(test_loader)
    accuracy = total_correct / total_tokens
    if avg_loss < min_loss:
        min_loss = avg_loss
        torch.save(model, "lyrics.pt")

    writer.add_scalar("Loss/Test", avg_loss, epoch)
    writer.add_scalar("Accuracy/Test", accuracy, epoch)

    print(f"[Verify] Epoch {epoch + 1}, Loss: {avg_loss:.4f}, Accuracy: {accuracy:.4f}")

    model.train()


def train():
    for epoch in trange(epochs, desc="Epoch", leave=False):
        total_loss = 0
        total_correct = 0
        total_tokens = 0

        for x, y in tqdm(train_loader, desc="Batch", leave=False):
            x = x.to(device)
            y = y.to(device)
            optimizer.zero_grad()

            output = model(x)  # [batch_size, seq_len, vocab_size]
            vocab_size = len(dataset.word2index)

            # 计算 loss
            loss = criterion(output[:, :-1].reshape(-1, vocab_size),
                             y[:, 1:].reshape(-1))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

            # ===== 计算准确率 =====
            pred = output[:, :-1].argmax(dim=-1)   # [batch_size, seq_len-1]
            target = y[:, 1:]                      # [batch_size, seq_len-1]

            correct = (pred == target).sum().item()
            total = torch.numel(target)

            total_correct += correct
            total_tokens += total

        avg_loss = total_loss / len(train_loader)
        accuracy = total_correct / total_tokens

        writer.add_scalar("Loss/Train", avg_loss, epoch)
        writer.add_scalar("Accuracy/Train", accuracy, epoch)
        verify(epoch)

        print(f"Epoch {epoch+1}/{epochs}, Loss: {avg_loss:.4f}, Accuracy: {accuracy:.4f}")


train()

