# train_no_attention.py
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
import random
import time
import json
from tqdm import tqdm
from until import device, SOS_token, EOS_token, MAX_LENGTH, teacher_forcing_ratio, my_lr, epochs
from dataset import MyPairDataset
from get_dict import get_data
from encoder import MyEncoderGRU          # 你的 EncoderGRU
from decoder import MyDecoderGRU           # 你的 DecoderGRU

# ---------- 1. 数据 ----------
english_word2index, english_index2word, english_word_n, \
french_word2index, french_index2word, french_word_n, my_pairs = get_data()

# ---------- 2. 训练 ----------
def train_iter(x, y,
               enc: MyEncoderGRU,
               dec: MyDecoderGRU,
               enc_opt: optim.Optimizer,
               dec_opt: optim.Optimizer,
               criterion: nn.Module):
    """
    一次样本（batch=1）的前向 + 反向
    """
    enc_opt.zero_grad()
    dec_opt.zero_grad()

    # 1. 编码
    enc_hidden = enc.init_hidden().to(device)
    enc_out, enc_hidden = enc(x, enc_hidden)          # enc_out 未用，仅 hidden 传给解码器

    # 2. 解码
    dec_input = torch.tensor([[SOS_token]], device=device)
    dec_hidden = enc_hidden                           # 用编码器最后时刻隐藏状态初始化解码器

    target_len = y.shape[1]
    loss = 0

    use_teacher_forcing = random.random() < teacher_forcing_ratio

    for di in range(target_len):
        dec_out, dec_hidden = dec(dec_input, dec_hidden)   # dec_out: [1, vocab_size]
        target = y[0][di].view(1)                          # 标量
        loss += criterion(dec_out, target)

        if use_teacher_forcing:
            dec_input = y[0][di].view(1, -1)               # teacher forcing
        else:
            topv, topi = dec_out.topk(1)
            dec_input = topi.detach()                        # 自由运行
            if dec_input.item() == EOS_token:
                break

    loss.backward()
    enc_opt.step()
    dec_opt.step()
    return loss.item() / target_len


def train():
    dataset = MyPairDataset(my_pairs)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=True, drop_last=False)

    enc = MyEncoderGRU(vocab_size=english_word_n, hidden_size=256).to(device)
    dec = MyDecoderGRU(vocab_size=french_word_n, hidden_size=256).to(device)

    enc_opt = optim.Adam(enc.parameters(), lr=my_lr)
    dec_opt = optim.Adam(dec.parameters(), lr=my_lr)
    criterion = nn.NLLLoss()

    plot_loss, total_loss, total_cnt = [], 0.0, 0
    start = time.time()

    for epoch in range(1, epochs + 1):
        epoch_loss = 0.0
        for x, y in tqdm(dataloader, desc=f"Epoch {epoch}"):
            x, y = x.to(device), y.to(device)
            loss = train_iter(x, y, enc, dec, enc_opt, dec_opt, criterion)
            epoch_loss += loss
            total_loss += loss
            total_cnt += 1

            if total_cnt % 100 == 0:
                plot_loss.append(total_loss / total_cnt)

        # 每轮保存模型
        torch.save(enc.state_dict(), f'./model/noAtt_enc_{epoch}.bin')
        torch.save(dec.state_dict(), f'./model/noAtt_dec_{epoch}.bin')
        print(f"Epoch {epoch} 平均损失: {epoch_loss / len(dataloader):.4f}")

    # 记录训练曲线
    with open('./result/noAtt_loss.json', 'w') as f:
        json.dump({'loss': plot_loss, 'time': int(time.time() - start)}, f)


if __name__ == '__main__':
    train()