import torch
import torch.nn as nn
import torch.optim as optim
from dataclasses import dataclass
from typing import List, Tuple


@dataclass
class Config:
    d_model: int = 128
    n_head: int = 4
    n_enc: int = 2
    n_dec: int = 2
    ffn_dim: int = 256
    dropout: float = 0.1
    src_pad_id: int = 0
    tgt_pad_id: int = 0
    tgt_bos_id: int = 1
    tgt_eos_id: int = 2
    max_src_len: int = 64
    max_tgt_len: int = 64
    batch_size: int = 32


class Seq2SeqTransformer(nn.Module):
    def __init__(self, cfg: Config, src_vocab_size: int, tgt_vocab_size: int):
        super().__init__()
        self.cfg = cfg

        self.src_emb = nn.Embedding(src_vocab_size, cfg.d_model, padding_idx=cfg.src_pad_id)
        self.tgt_emb = nn.Embedding(tgt_vocab_size, cfg.d_model, padding_idx=cfg.tgt_pad_id)
        self.src_pos = nn.Embedding(cfg.max_src_len, cfg.d_model)
        self.tgt_pos = nn.Embedding(cfg.max_tgt_len, cfg.d_model)

        self.transformer = nn.Transformer(
            d_model=cfg.d_model,
            nhead=cfg.n_head,
            num_encoder_layers=cfg.n_enc,
            num_decoder_layers=cfg.n_dec,
            dim_feedforward=cfg.ffn_dim,
            dropout=cfg.dropout,
            batch_first=True,
            norm_first=True,
        )

        self.ln_f = nn.LayerNorm(cfg.d_model)
        self.head = nn.Linear(cfg.d_model, tgt_vocab_size, bias=False)

    def forward(
        self,
        src: torch.Tensor,
        tgt_inp: torch.Tensor,
        src_pad_mask: torch.Tensor,
        tgt_pad_mask: torch.Tensor,
    ) -> torch.Tensor:
        # src: [b, s_src], tgt_inp: [b, s_tgt]
        b, s_src = src.shape
        _, s_tgt = tgt_inp.shape

        src_h = self.src_emb(src) + self.src_pos(torch.arange(s_src, device=src.device))[None, :, :]
        tgt_h = self.tgt_emb(tgt_inp) + self.tgt_pos(torch.arange(s_tgt, device=tgt_inp.device))[None, :, :]

        # causal mask for decoder (True means blocked)
        causal_mask = torch.triu(torch.ones(s_tgt, s_tgt, dtype=torch.bool, device=tgt_inp.device), diagonal=1)

        mem = self.transformer.encoder(src_h, src_key_padding_mask=src_pad_mask)
        out = self.transformer.decoder(
            tgt_h,
            mem,
            tgt_mask=causal_mask,
            tgt_key_padding_mask=tgt_pad_mask,
            memory_key_padding_mask=src_pad_mask,
        )
        out = self.ln_f(out)
        logits = self.head(out)  # [b, s_tgt, vocab_tgt]
        return logits

    @torch.no_grad()
    def generate(self, src: torch.Tensor, src_pad_mask: torch.Tensor, max_new_tokens: int) -> torch.Tensor:
        b = src.size(0)
        # start with BOS
        ys = torch.full((b, 1), self.cfg.tgt_bos_id, dtype=torch.long, device=src.device)
        mem = self.transformer.encoder(
            self.src_emb(src) + self.src_pos(torch.arange(src.size(1), device=src.device))[None, :, :],
            src_key_padding_mask=src_pad_mask,
        )
        for _ in range(max_new_tokens):
            s_tgt = ys.size(1)
            tgt_h = self.tgt_emb(ys) + self.tgt_pos(torch.arange(s_tgt, device=src.device))[None, :, :]
            causal_mask = torch.triu(torch.ones(s_tgt, s_tgt, dtype=torch.bool, device=src.device), diagonal=1)
            out = self.transformer.decoder(
                tgt_h,
                mem,
                tgt_mask=causal_mask,
                tgt_key_padding_mask=None,
                memory_key_padding_mask=src_pad_mask,
            )
            logits = self.head(self.ln_f(out))
            next_tok = torch.argmax(logits[:, -1, :], dim=-1, keepdim=True)
            ys = torch.cat([ys, next_tok], dim=1)
            if torch.all(next_tok.squeeze(-1) == self.cfg.tgt_eos_id):
                break
        return ys


def build_char_pair_dataset() -> Tuple[List[Tuple[str, str]], dict, dict, dict, dict]:
    pairs = [
        ("我爱学习。", "I love learning."),
        ("注意力很重要。", "Attention is important."),
        ("你好，世界！", "Hello, world!"),
        ("小模型也能训练。", "Tiny models can be trained."),
        ("给出一个例子。", "Provide an example."),
    ]
    src_text = "".join(s for s, _ in pairs)
    tgt_text = "".join(t for _, t in pairs)

    # Special tokens
    SRC_SPECIAL = ["<pad>"]
    TGT_SPECIAL = ["<pad>", "<bos>", "<eos>"]

    src_vocab = SRC_SPECIAL + sorted(list(set(src_text)))
    tgt_vocab = TGT_SPECIAL + sorted(list(set(tgt_text)))

    src_stoi = {ch: i for i, ch in enumerate(src_vocab)}
    src_itos = {i: ch for ch, i in src_stoi.items()}
    tgt_stoi = {ch: i for i, ch in enumerate(tgt_vocab)}
    tgt_itos = {i: ch for ch, i in tgt_stoi.items()}

    return pairs, src_stoi, src_itos, tgt_stoi, tgt_itos


def encode_pairs(
    pairs: List[Tuple[str, str]],
    src_stoi: dict,
    tgt_stoi: dict,
    cfg: Config,
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
    res = []
    for s, t in pairs:
        src_ids = torch.tensor([src_stoi[c] for c in s], dtype=torch.long)
        tgt_ids = torch.tensor([tgt_stoi[c] for c in t], dtype=torch.long)
        # add BOS/EOS for target
        bos = torch.tensor([cfg.tgt_bos_id], dtype=torch.long)
        eos = torch.tensor([cfg.tgt_eos_id], dtype=torch.long)
        tgt_inp = torch.cat([bos, tgt_ids], dim=0)
        tgt_out = torch.cat([tgt_ids, eos], dim=0)
        res.append((src_ids, tgt_inp, tgt_out))
    return res


def pad_batch(batch: List[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]], cfg: Config):
    src_pad_id, tgt_pad_id = cfg.src_pad_id, cfg.tgt_pad_id
    src_max = min(max(x[0].size(0) for x in batch), cfg.max_src_len)
    tgt_max = min(max(x[1].size(0) for x in batch), cfg.max_tgt_len)

    srcs, tgt_inps, tgt_outs = [], [], []
    for src, tgt_inp, tgt_out in batch:
        src = src[:src_max]
        tgt_inp = tgt_inp[:tgt_max]
        tgt_out = tgt_out[:tgt_max]
        srcs.append(torch.cat([src, torch.full((src_max - src.size(0),), src_pad_id, dtype=torch.long)]))
        tgt_inps.append(torch.cat([tgt_inp, torch.full((tgt_max - tgt_inp.size(0),), tgt_pad_id, dtype=torch.long)]))
        tgt_outs.append(torch.cat([tgt_out, torch.full((tgt_max - tgt_out.size(0),), tgt_pad_id, dtype=torch.long)]))

    src_batch = torch.stack(srcs)
    tgt_inp_batch = torch.stack(tgt_inps)
    tgt_out_batch = torch.stack(tgt_outs)

    src_pad_mask = src_batch.eq(src_pad_id)
    tgt_pad_mask = tgt_inp_batch.eq(tgt_pad_id)
    return src_batch, tgt_inp_batch, tgt_out_batch, src_pad_mask, tgt_pad_mask


def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    pairs, src_stoi, src_itos, tgt_stoi, tgt_itos = build_char_pair_dataset()

    cfg = Config()
    cfg.src_pad_id = src_stoi["<pad>"]
    cfg.tgt_pad_id = tgt_stoi["<pad>"]
    cfg.tgt_bos_id = tgt_stoi["<bos>"]
    cfg.tgt_eos_id = tgt_stoi["<eos>"]

    data = encode_pairs(pairs, src_stoi, tgt_stoi, cfg)

    model = Seq2SeqTransformer(cfg, src_vocab_size=len(src_stoi), tgt_vocab_size=len(tgt_stoi)).to(device)
    opt = optim.AdamW(model.parameters(), lr=3e-4)
    criterion = nn.CrossEntropyLoss(ignore_index=cfg.tgt_pad_id)

    steps = 300
    model.train()
    import random

    for step in range(1, steps + 1):
        batch = random.sample(data, k=min(cfg.batch_size, len(data)))
        src, tgt_inp, tgt_out, src_mask, tgt_mask = pad_batch(batch, cfg)
        src = src.to(device)
        tgt_inp = tgt_inp.to(device)
        tgt_out = tgt_out.to(device)
        src_mask = src_mask.to(device)
        tgt_mask = tgt_mask.to(device)

        logits = model(src, tgt_inp, src_mask, tgt_mask)
        loss = criterion(logits.view(-1, logits.size(-1)), tgt_out.view(-1))
        opt.zero_grad(set_to_none=True)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        opt.step()

        if step % 50 == 0:
            print(f"step {step:4d} | loss {loss.item():.4f}")

    # quick generation demo
    model.eval()
    s = "我爱学习。"
    src_ids = torch.tensor([[src_stoi[c] for c in s]], dtype=torch.long, device=device)
    src_mask = src_ids.eq(cfg.src_pad_id)
    out_ids = model.generate(src_ids, src_mask, max_new_tokens=64)[0].tolist()
    # stop at EOS
    try:
        eos_pos = out_ids.index(cfg.tgt_eos_id)
        out_ids = out_ids[1:eos_pos]  # drop BOS
    except ValueError:
        out_ids = out_ids[1:]
    decoded = "".join(tgt_itos[i] for i in out_ids)
    print("\n=== Translation ===\nSRC: ", s)
    print("TGT: ", decoded)


if __name__ == "__main__":
    main()