from train_eval import *


def main():
    """
    数据
    """
    train_set = ReverseDataset(size=12000)
    valid_set = ReverseDataset(size=2000)
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, collate_fn=pad_to_max)
    valid_loader = DataLoader(valid_set, batch_size=BATCH_SIZE, shuffle=False, collate_fn=pad_to_max)
    """
    模型
    """
    model = Seq2seqTransformer(
        src_vocab_size=SRC_VOCAB_SIZE,
        tgt_vocab_size=TGT_VOCAB_SIZE,
        d_model=D_MODEL,
        n_heads=N_HEADS,
        num_encoder_layers=NUM_ENCODER_LAYERS,
        num_decoder_layers=NUM_DECODER_LAYERS,
        dim_feedforward=DIM_FF,
        dropout=DROPOUT,
        max_len=MAX_LAN
    ).to(DEVICE)
    """
    优化器
    """
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
    """
    训练
    """
    for epoch in range(1, NUM_EPOCHS + 1):
        train_ppl, train_acc = train_one_epoch(model, train_loader, optimizer)
        valid_ppl, valid_acc = evaluate(model, valid_loader)
        print(f"[Epoch {epoch:02d}] train_ppl={train_ppl:.3f}  train_acc={train_acc:.3f}  "
              f"valid_ppl={valid_ppl:.3f}  valid_acc={valid_acc:.3f}")

    test_set = ReverseDataset(size=5)
    src_batch, _ = pad_to_max(test_set[:])
    src_batch = src_batch.to(DEVICE)
    pred = model.greedy_decode(src_batch, max_len=MAX_LAN)  # [B, L']
    print("\n=== Greedy Decode Examples ===")
    for i in range(src_batch.size(0)):
        src = src_batch[i].tolist()
        hyp = pred[i].tolist()

        # 还原可读：去掉 PAD，直到 EOS
        def detok(seq):
            out = []
            for t in seq:
                if t == PAD_IDX: continue
                if t == BOS_IDX: out.append("<BOS>"); continue
                if t == EOS_IDX: out.append("<EOS>"); break
                out.append(str(t - VOCAB_BASE))
            return " ".join(out)

        print(f"src: {detok(src)}")
        print(f"hyp: {detok(hyp)}\n")


if __name__ == "__main__":
    main()
