#!/usr/bin/env python3
"""
train_text_decoder_with_tb_improved.py

改动说明：
- 去除所有 try/except，以便异常直接暴露，逻辑更清晰。
- 变量名更易懂，部分使用 Transformer 风格命名（query/key/memory）。
- 保持原有训练/评估/日志功能。

用法示例同原脚本。
"""
import argparse
import json
import math
import os
import random
from typing import Optional

import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter

# optional HF tokenizer (注意：如果未安装 transformers，会抛异常)
from transformers import AutoTokenizer

# ----------------------------
# Utilities & dataset loader
# ----------------------------
def load_tokens_file(tokens_path):
    import torch as _torch
    import numpy as _np

    if tokens_path.endswith(".pt") or tokens_path.endswith(".pth"):
        data = _torch.load(tokens_path, map_location="cpu")
        if isinstance(data, _torch.Tensor):
            print(f"[INFO] Detected tensor tokens file with shape {tuple(data.shape)}, converting to list-of-lists...")
            data = data.tolist()
        elif isinstance(data, list):
            if all(isinstance(x, list) for x in data):
                pass
            elif all(isinstance(x, _torch.Tensor) for x in data):
                data = [x.tolist() for x in data]
            else:
                raise ValueError("Loaded .pt but inner elements are not lists or tensors.")
        else:
            if isinstance(data, dict) and "input_ids" in data:
                arr = data["input_ids"]
                if isinstance(arr, _torch.Tensor):
                    data = arr.tolist()
                else:
                    raise ValueError("Unsupported 'input_ids' type inside .pt")
            else:
                raise ValueError(f"Unsupported .pt data type: {type(data)}")

    elif tokens_path.endswith(".npy") or tokens_path.endswith(".npz"):
        arr = np.load(tokens_path, allow_pickle=True)
        data = arr.tolist()
    elif tokens_path.endswith(".jsonl"):
        data = []
        with open(tokens_path, "r", encoding="utf-8") as f:
            for line in f:
                obj = json.loads(line)
                if "tokens" in obj:
                    data.append([int(x) for x in obj["tokens"]])
                else:
                    data.append([int(x) for x in obj])
    elif tokens_path.endswith(".json"):
        with open(tokens_path, "r", encoding="utf-8") as f:
            obj = json.load(f)
            if isinstance(obj, list) and isinstance(obj[0], list):
                data = [[int(x) for x in row] for row in obj]
            else:
                raise ValueError("Unsupported .json tokens schema.")
    else:
        data = []
        with open(tokens_path, "r", encoding="utf-8") as f:
            for line in f:
                parts = line.strip().split()
                if len(parts) == 0:
                    data.append([])
                else:
                    data.append([int(x) for x in parts])
    print(f"[INFO] Loaded tokens file ({len(data)} entries).")
    return data


def load_clip_embs(path: str) -> np.ndarray:
    if path.endswith(".npy") or path.endswith(".npz"):
        arr = np.load(path, allow_pickle=True)
        return np.asarray(arr)
    elif path.endswith(".pt") or path.endswith(".pth"):
        obj = torch.load(path, map_location="cpu")
        if isinstance(obj, torch.Tensor):
            return obj.cpu().numpy()
        else:
            return np.asarray(obj)
    elif path.endswith(".npy.gz"):
        import gzip
        with gzip.open(path, "rb") as f:
            import numpy as _np
            return _np.load(f)
    else:
        raise ValueError("Unsupported clip_embs_path format. Use .npy or .pt")


class CaptionDataset(Dataset):
    """
    Dataset 返回项键名：
      - "input_ids": tensor long (token ids)
      - "clip_features": tensor float (clip embedding)
    """
    def __init__(self, tokens_path: str, clip_embs_path: str):
        self.token_sequences = load_tokens_file(tokens_path)
        self.clip_embeddings = load_clip_embs(clip_embs_path)

        len_tokens = len(self.token_sequences)
        len_embs = len(self.clip_embeddings)
        if len_tokens != len_embs:
            min_len = min(len_tokens, len_embs)
            print(f"[WARN] tokens ({len_tokens}) and clip_embs ({len_embs}) not aligned, truncating to {min_len} samples.")
            self.token_sequences = self.token_sequences[:min_len]
            self.clip_embeddings = self.clip_embeddings[:min_len]
        else:
            print(f"[INFO] tokens and clip_embs aligned ({len_tokens} samples).")
        flat_max = max((max(seq) if len(seq) > 0 else -1) for seq in self.token_sequences)
        flat_min = min((min(seq) if len(seq) > 0 else -1) for seq in self.token_sequences)
        lens = [len(s) for s in self.token_sequences]
        max_len = max(lens) if lens else 0
        mean_len = sum(lens) / len(lens) if lens else 0
        print(f"[INFO] token id range: min={flat_min}, max={flat_max}; token lengths: max={max_len}, mean={mean_len:.2f}")
        self._max_token_id = int(flat_max)
        self._min_token_id = int(flat_min)

        self.clip_embeddings = np.asarray(self.clip_embeddings)
        if self.clip_embeddings.ndim == 1:
            self.clip_embeddings = np.expand_dims(self.clip_embeddings, 0)
        self.n = len(self.token_sequences)

    def __len__(self):
        return self.n

    def __getitem__(self, idx):
        if idx >= self.n:
            idx = self.n - 1
        toks = torch.tensor(self.token_sequences[idx], dtype=torch.long)
        emb = torch.tensor(self.clip_embeddings[idx], dtype=torch.float32)
        return {"input_ids": toks, "clip_features": emb}


def collate_fn(batch, pad_token_id: int, max_len: Optional[int] = None, bos_token_id: Optional[int] = None):
    """
    collate 输出：
      - decoder_input_ids: (B, L)  -> 输入给 decoder（[BOS] + tokens[:-1]）
      - target_ids: (B, L)         -> 原始 tokens（成为目标）
      - attention_mask: (B, L) bool
      - clip_features: (B, clip_dim)
      - lengths: (B,)
    """
    token_tensors = [b["input_ids"] for b in batch]
    clip_features = torch.stack([b["clip_features"] for b in batch], dim=0)
    lengths = [t.numel() for t in token_tensors]
    L = max(lengths) if max_len is None else min(max(lengths), max_len)
    batch_size = len(token_tensors)
    tokens_pad = token_tensors[0].new_full((batch_size, L), pad_token_id)
    attention_mask = token_tensors[0].new_zeros((batch_size, L), dtype=torch.bool)
    for i, t in enumerate(token_tensors):
        ln = min(t.numel(), L)
        tokens_pad[i, :ln] = t[:ln]
        attention_mask[i, :ln] = 1

    # decoder input = [BOS] + tokens[:-1]
    target_ids = tokens_pad.clone()
    decoder_input_ids = tokens_pad.new_full((batch_size, L), pad_token_id)
    if bos_token_id is None:
        bos_token_id = pad_token_id
    decoder_input_ids[:, 0] = bos_token_id
    if L > 1:
        decoder_input_ids[:, 1:] = tokens_pad[:, :-1]

    return {
        "decoder_input_ids": decoder_input_ids,   # (B, L)
        "target_ids": target_ids,                 # (B, L)
        "attention_mask": attention_mask,         # (B, L) bool
        "clip_features": clip_features,           # (B, clip_dim)
        "lengths": torch.tensor(lengths, dtype=torch.long)
    }


# ----------------------------
# Model
# ----------------------------
class ClipConditionedTransformerDecoder(nn.Module):
    def __init__(
        self,
        vocab_size: int,
        d_model: int = 256,
        nhead: int = 4,
        num_layers: int = 3,
        dim_feedforward: int = 1024,
        clip_emb_dim: int = 512,
        max_len: int = 64,
        dropout: float = 0.3,
        pad_token_id: int = 0,
    ):
        super().__init__()
        self.d_model = d_model
        self.vocab_size = vocab_size
        self.pad_token_id = pad_token_id
        self.token_emb = nn.Embedding(vocab_size, d_model, padding_idx=pad_token_id)
        self.pos_emb = nn.Embedding(max_len, d_model)
        self.dropout = nn.Dropout(dropout)

        decoder_layer = nn.TransformerDecoderLayer(
            d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, activation="gelu")
        self.decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)

        # 将 clip embedding 投影到 decoder 的 memory 维度（作为 key/value）
        self.clip_to_mem = nn.Linear(clip_emb_dim, d_model)
        self.output_fc = nn.Linear(d_model, vocab_size)

        self._reset_parameters()

    def _reset_parameters(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(
        self,
        decoder_input_ids: torch.LongTensor,   # (B, T) 作为 query 序列的 token ids
        clip_features: torch.FloatTensor,      # (B, clip_dim) 作为 memory 来源
        causal_mask: Optional[torch.Tensor] = None,
        key_padding_mask: Optional[torch.Tensor] = None,
    ):
        """
        名称说明（Transformer 视角）:
          - query_seq: decoder 的 token embeddings（作为 query）
          - clip_memory: clip embedding 投影后，作为 decoder 的 memory（key/value）
        """
        B, T = decoder_input_ids.shape
        device = decoder_input_ids.device
        pos = torch.arange(0, T, device=device).unsqueeze(0).expand(B, T)
        token_embeddings = self.token_emb(decoder_input_ids) * math.sqrt(self.d_model)   # (B, T, d)
        pos_embeddings = self.pos_emb(pos)   # (B, T, d)
        query_seq = self.dropout(token_embeddings + pos_embeddings).transpose(0, 1)   # (T, B, d) 作为 Transformer query

        clip_memory = self.clip_to_mem(clip_features)    # (B, d)
        # TransformerDecoder expects memory shape (S, B, E) where S is source sequence length;
        # 将 clip memory 做成长度为1的 source
        clip_memory = clip_memory.unsqueeze(0)  # (1, B, d)

        decoder_output = self.decoder(query_seq, clip_memory, tgt_mask=causal_mask, tgt_key_padding_mask=key_padding_mask)
        decoder_output = decoder_output.transpose(0, 1)   # (B, T, d)
        logits = self.output_fc(decoder_output)  # (B, T, V)
        return logits

    @staticmethod
    def generate_causal_mask(sz: int, device=None):
        mask = torch.triu(torch.ones(sz, sz, device=device), diagonal=1).bool()
        mask = mask.float().masked_fill(mask, float('-inf')).masked_fill(~mask, 0.0)
        return mask


# ----------------------------
# Helpers: loss, decoding, accuracy
# ----------------------------
def compute_loss(logits: torch.Tensor, target: torch.LongTensor, pad_token_id: int):
    B, T, V = logits.shape
    loss_f = nn.CrossEntropyLoss(ignore_index=pad_token_id, reduction="sum")
    logits_flat = logits.view(-1, V)
    target_flat = target.view(-1)
    loss_sum = loss_f(logits_flat, target_flat)
    nonpad = (target_flat != pad_token_id).sum().item()
    loss_per_token = loss_sum / max(1, nonpad)
    return loss_per_token, nonpad


def compute_accuracy(logits: torch.Tensor, target: torch.LongTensor, pad_token_id: int):
    """
    返回 (token_accuracy_float, seq_accuracy_float, nonpad_count_int)
    """
    preds = logits.argmax(dim=-1)  # (B, T)
    mask = target != pad_token_id  # (B, T)
    nonpad = int(mask.sum().item())

    token_acc = 0.0
    if nonpad > 0:
        correct_tokens = ((preds == target) & mask).sum().item()
        token_acc = correct_tokens / nonpad
    else:
        token_acc = 0.0

    B = target.size(0)
    seq_correct = 0
    seq_denom = 0
    for i in range(B):
        m = mask[i]
        if int(m.sum().item()) == 0:
            continue
        seq_denom += 1
        if torch.equal(preds[i][m], target[i][m]):
            seq_correct += 1

    seq_acc = (seq_correct / seq_denom) if seq_denom > 0 else 0.0
    return float(token_acc), float(seq_acc), nonpad


def greedy_decode(model: nn.Module, clip_feature: torch.Tensor, max_len: int = 32, device="cpu",
                  start_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, tokenizer=None):
    """
    自回归贪心解码（单样本）。
    返回 (generated_ids_list, decoded_text_or_None)
    """
    model.eval()
    if clip_feature.dim() == 1:
        clip_feature = clip_feature.unsqueeze(0)
    clip_feature = clip_feature.to(device)

    if start_token_id is None:
        if tokenizer is not None and getattr(tokenizer, "bos_token_id", None) is not None:
            start_token_id = tokenizer.bos_token_id
        elif tokenizer is not None and getattr(tokenizer, "eos_token_id", None) is not None:
            start_token_id = tokenizer.eos_token_id
        else:
            start_token_id = getattr(model, "pad_token_id", 0)

    cur = torch.full((1, 1), start_token_id, dtype=torch.long, device=device)
    generated = []

    for step in range(max_len):
        causal_mask = model.generate_causal_mask(cur.size(1), device=device)
        logits = model(cur, clip_feature, causal_mask=causal_mask, key_padding_mask=(cur == model.pad_token_id))
        next_logits = logits[:, -1, :]   # (1, V)
        next_token = torch.argmax(next_logits, dim=-1)   # (1,)
        token_id = next_token.item()
        generated.append(token_id)
        if eos_token_id is not None and token_id == eos_token_id and len(generated) > 0:
            break
        cur = torch.cat([cur, next_token.unsqueeze(1)], dim=1)

    decoded = None
    if tokenizer is not None:
        # 移除潜在的开头/结尾特殊 token 再 decode
        trimmed = list(generated)
        lead = getattr(tokenizer, "eos_token_id", None)
        pad = getattr(tokenizer, "pad_token_id", None)
        while len(trimmed) > 0 and trimmed[0] in (lead, pad):
            trimmed.pop(0)
        while len(trimmed) > 0 and trimmed[-1] in (lead, pad):
            trimmed.pop()
        decoded = tokenizer.decode(trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=True) if len(trimmed) > 0 else ""
    else:
        decoded = " ".join([str(x) for x in generated]) if len(generated) > 0 else ""
    return generated, decoded


# ----------------------------
# Training loop (with augmentations and TB logging)
# ----------------------------
def train_epoch(model: nn.Module, dataloader: DataLoader, optimizer, device, args, writer: SummaryWriter, global_step: int):
    model.train()
    total_loss = 0.0
    total_tokens = 0

    token_correct_sum = 0
    nonpad_total = 0
    seq_correct_sum = 0
    seq_denom_total = 0

    pbar = tqdm(enumerate(dataloader), desc="train", total=len(dataloader), leave=False)
    for batch_idx, batch in pbar:
        decoder_input_ids = batch["decoder_input_ids"].to(device)
        target_ids = batch["target_ids"].to(device)
        attention_mask = batch["attention_mask"].to(device)
        clip_features = batch["clip_features"].to(device)

        # --- augmentations for training ---
        if args.clip_noise > 0:
            clip_features = clip_features + args.clip_noise * torch.randn_like(clip_features)

        # token dropout: 随机将一些非pad token 在 decoder input 中替换为 pad（不替换位置0）
        if args.token_dropout > 0:
            mask = (torch.rand_like(decoder_input_ids.float()) < args.token_dropout) & (decoder_input_ids != args.pad_token_id)
            mask[:, 0] = False
            decoder_input_ids = decoder_input_ids.masked_fill(mask, args.pad_token_id)

        max_tok = int(decoder_input_ids.max().item())
        min_tok = int(decoder_input_ids.min().item())

        num_embeddings = model.token_emb.num_embeddings
        if max_tok >= num_embeddings:
            toks_cpu = decoder_input_ids.detach().cpu().numpy()
            bad_vals = toks_cpu[toks_cpu >= num_embeddings]
            print(f"[ERROR] Found token ids >= vocab_size in batch {batch_idx}, first offending: {bad_vals.flatten()[:20].tolist()}")
            raise RuntimeError("Found token ids >= vocab_size; increase --vocab_size or fix tokens.")

        B, T = decoder_input_ids.shape
        causal_mask = model.generate_causal_mask(T, device=device)
        key_padding_mask = ~attention_mask

        logits = model(decoder_input_ids, clip_features, causal_mask=causal_mask, key_padding_mask=key_padding_mask)

        # compute accuracy BEFORE backward
        batch_token_acc, batch_seq_acc, batch_nonpad = compute_accuracy(logits, target_ids, args.pad_token_id)

        token_correct_sum += int(batch_token_acc * batch_nonpad)
        nonpad_total += batch_nonpad

        # sequence-level绝对计数
        preds = logits.argmax(dim=-1)
        mask = target_ids != args.pad_token_id
        seq_correct = 0
        seq_denom = 0
        for i in range(B):
            m = mask[i]
            if int(m.sum().item()) == 0:
                continue
            seq_denom += 1
            if torch.equal(preds[i][m], target_ids[i][m]):
                seq_correct += 1
        seq_correct_sum += seq_correct
        seq_denom_total += seq_denom

        loss_value, nonpad = compute_loss(logits, target_ids, args.pad_token_id)

        optimizer.zero_grad()
        loss_value.backward()
        grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
        optimizer.step()

        # per-batch logging
        writer.add_scalar("train/loss_per_token", loss_value.item(), global_step)
        writer.add_scalar("train/grad_norm", float(grad_norm), global_step)
        writer.add_scalar("train/token_accuracy", batch_token_acc, global_step)
        writer.add_scalar("train/sequence_accuracy", batch_seq_acc, global_step)
        writer.add_scalar("train/batch_token_max", max_tok, global_step)

        total_loss += loss_value.item() * nonpad
        total_tokens += nonpad
        pbar.set_postfix({"loss_per_token": loss_value.item(), "batch_idx": batch_idx,
                          "batch_token_acc": f"{batch_token_acc:.4f}", "batch_seq_acc": f"{batch_seq_acc:.4f}"})
        global_step += 1

    avg_loss = total_loss / max(1, total_tokens)
    avg_token_acc = (token_correct_sum / nonpad_total) if nonpad_total > 0 else 0.0
    avg_seq_acc = (seq_correct_sum / seq_denom_total) if seq_denom_total > 0 else 0.0
    return avg_loss, global_step, avg_token_acc, avg_seq_acc


def eval_epoch(model: nn.Module, dataloader: DataLoader, device, args, writer: SummaryWriter, epoch: int):
    model.eval()
    total_loss = 0.0
    total_tokens = 0

    token_correct_sum = 0
    nonpad_total = 0
    seq_correct_sum = 0
    seq_denom_total = 0

    with torch.no_grad():
        for batch_idx, batch in enumerate(dataloader):
            decoder_input_ids = batch["decoder_input_ids"].to(device)
            target_ids = batch["target_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            clip_features = batch["clip_features"].to(device)

            B, T = decoder_input_ids.shape
            causal_mask = model.generate_causal_mask(T, device=device)
            key_padding_mask = ~attention_mask
            logits = model(decoder_input_ids, clip_features, causal_mask=causal_mask, key_padding_mask=key_padding_mask)
            loss_value, nonpad = compute_loss(logits, target_ids, args.pad_token_id)
            total_loss += loss_value.item() * nonpad
            total_tokens += nonpad

            batch_token_acc, batch_seq_acc, batch_nonpad = compute_accuracy(logits, target_ids, args.pad_token_id)
            token_correct_sum += int(batch_token_acc * batch_nonpad)
            nonpad_total += batch_nonpad

            preds = logits.argmax(dim=-1)
            mask = target_ids != args.pad_token_id
            seq_correct = 0
            seq_denom = 0
            for i in range(B):
                m = mask[i]
                if int(m.sum().item()) == 0:
                    continue
                seq_denom += 1
                if torch.equal(preds[i][m], target_ids[i][m]):
                    seq_correct += 1
            seq_correct_sum += seq_correct
            seq_denom_total += seq_denom

    avg_loss = total_loss / max(1, total_tokens)
    avg_token_acc = (token_correct_sum / nonpad_total) if nonpad_total > 0 else 0.0
    avg_seq_acc = (seq_correct_sum / seq_denom_total) if seq_denom_total > 0 else 0.0

    writer.add_scalar("val/loss_per_token", avg_loss, epoch)
    writer.add_scalar("val/token_accuracy", avg_token_acc, epoch)
    writer.add_scalar("val/sequence_accuracy", avg_seq_acc, epoch)
    return avg_loss, avg_token_acc, avg_seq_acc


# ----------------------------
# CLI & main
# ----------------------------
def parse_args():
    p = argparse.ArgumentParser()
    p.add_argument("--tokens_path", type=str,  help="Path to token ids (jsonl/.npy/.pt/.txt)",  default="./data_process/processed_data/add_suffix_company_input_ids.pt")
    p.add_argument("--clip_embs_path", type=str, help="Path to clip text embeddings (.npy/.pt)", default="./data_process/processed_data/add_suffix_company_gpt2_text_embeddings.pt")
    p.add_argument("--tokenizer_name_or_path", type=str, default="/home/fang_guotong/.cache/huggingface/hub/models--gpt2/snapshots/607a30d783dfa663caf39e06633721c8d4cfcd7e", help="HuggingFace tokenizer id or local path (optional)")
    p.add_argument("--resume_from", type=str, default=None, help="Path to checkpoint to resume from (optional)")
    p.add_argument("--vocab_size", type=int, default=50257, help="initial vocab size (auto-expanded if smaller than data)")
    p.add_argument("--clip_emb_dim", type=int, default=768)
    p.add_argument("--d_model", type=int, default=256)
    p.add_argument("--num_layers", type=int, default=3)
    p.add_argument("--nhead", type=int, default=4)
    p.add_argument("--dim_feedforward", type=int, default=1024)
    p.add_argument("--max_len", type=int, default=64)
    p.add_argument("--pad_token_id", type=int, default=0)
    p.add_argument("--batch_size", type=int, default=128)
    p.add_argument("--lr", type=float, default=5e-5)
    p.add_argument("--epochs", type=int, default=100)
    p.add_argument("--grad_clip", type=float, default=1.0)
    p.add_argument("--save_dir", type=str, default="outputs")
    p.add_argument("--log_dir", type=str, default="outputs/runs")
    p.add_argument("--device", type=str, default="cuda:1" if torch.cuda.is_available() else "cpu")
    p.add_argument("--seed", type=int, default=42)
    p.add_argument("--val_split", type=float, default=0.05)
    p.add_argument("--samples_per_epoch", type=int, default=4, help="How many samples (train+val) to log each epoch")
    p.add_argument("--token_dropout", type=float, default=0.1, help="Probability to drop tokens in decoder input during training")
    p.add_argument("--clip_noise", type=float, default=0.01, help="Std dev of gaussian noise added to clip embeddings during training")
    p.add_argument("--weight_decay", type=float, default=0.05)
    return p.parse_args()


def set_seed(seed: int):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)


def main():
    args = parse_args()
    os.makedirs(args.save_dir, exist_ok=True)
    os.makedirs(args.log_dir, exist_ok=True)
    set_seed(args.seed)

    print("Loading dataset...")
    dataset = CaptionDataset(args.tokens_path, args.clip_embs_path)
    data_max_token = getattr(dataset, "_max_token_id", -1)
    if data_max_token >= 0:
        needed = data_max_token + 1
        if needed > args.vocab_size:
            print(f"[WARN] args.vocab_size ({args.vocab_size}) too small for data (max token id {data_max_token}). Auto-expand -> {needed}")
            args.vocab_size = needed

    n_total = len(dataset)
    n_val = max(1, int(args.val_split * n_total))
    n_train = n_total - n_val

    print(f"Total {n_total} captions -> train {n_train}, val {n_val}")

    # load tokenizer if requested (此处若 transformers 未安装或路径错误，会抛异常)
    tokenizer = None
    if args.tokenizer_name_or_path is not None:
        tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path, use_fast=True)
        print(f"[INFO] Loaded tokenizer from {args.tokenizer_name_or_path}. tokenizer.vocab_size={getattr(tokenizer,'vocab_size', 'N/A')}")

    # split
    train_set, val_set = torch.utils.data.random_split(dataset, [n_train, n_val])

    # collate must be defined after tokenizer known (to pass bos_token_id)
    bos_id = None
    if tokenizer is not None and getattr(tokenizer, "bos_token_id", None) is not None:
        bos_id = tokenizer.bos_token_id
    elif tokenizer is not None and getattr(tokenizer, "eos_token_id", None) is not None:
        bos_id = tokenizer.eos_token_id
    else:
        bos_id = args.pad_token_id

    collate = lambda batch: collate_fn(batch, pad_token_id=args.pad_token_id, max_len=args.max_len, bos_token_id=bos_id)

    num_workers = 2
    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, collate_fn=collate, num_workers=num_workers)
    val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, collate_fn=collate, num_workers=num_workers)

    if args.pad_token_id >= args.vocab_size:
        print(f"[WARN] pad_token_id ({args.pad_token_id}) >= vocab_size ({args.vocab_size}), resetting pad_token_id to 0.")
        args.pad_token_id = 0

    model = ClipConditionedTransformerDecoder(
        vocab_size=args.vocab_size,
        d_model=args.d_model,
        nhead=args.nhead,
        num_layers=args.num_layers,
        dim_feedforward=args.dim_feedforward,
        clip_emb_dim=args.clip_emb_dim,
        max_len=args.max_len,
        dropout=0.3,
        pad_token_id=args.pad_token_id
    ).to(args.device)

    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max(1, args.epochs))

    # optionally resume
    start_epoch = 1
    if args.resume_from is not None and os.path.exists(args.resume_from):
        print(f"[INFO] Resuming from {args.resume_from}")
        ckpt = torch.load(args.resume_from, map_location="cpu")
        model.load_state_dict(ckpt.get("model_state", ckpt), strict=False)
        if "optimizer_state" in ckpt:
            optimizer.load_state_dict(ckpt["optimizer_state"])
            print("[INFO] optimizer state loaded.")
        start_epoch = ckpt.get("epoch", 0) + 1

    writer = SummaryWriter(log_dir=args.log_dir)
    writer.add_text("run/args", json.dumps(vars(args), indent=2))

    best_val = float("inf")
    global_step = 0
    for epoch in range(start_epoch, args.epochs + 1):
        print(f"Epoch {epoch}/{args.epochs}")
        train_loss, global_step, train_token_acc, train_seq_acc = train_epoch(model, train_loader, optimizer, args.device, args, writer, global_step)
        val_loss, val_token_acc, val_seq_acc = eval_epoch(model, val_loader, args.device, args, writer, epoch)
        print(f"  train_loss={train_loss:.6f}   val_loss={val_loss:.6f}")
        print(f"  train_token_acc={train_token_acc:.4f}   train_seq_acc={train_seq_acc:.4f}")
        print(f"  val_token_acc={val_token_acc:.4f}     val_seq_acc={val_seq_acc:.4f}")

        writer.add_scalar("epoch/train_loss", train_loss, epoch)
        writer.add_scalar("epoch/val_loss", val_loss, epoch)
        writer.add_scalar("epoch/train_token_acc", train_token_acc, epoch)
        writer.add_scalar("epoch/train_seq_acc", train_seq_acc, epoch)
        writer.add_scalar("epoch/val_token_acc", val_token_acc, epoch)
        writer.add_scalar("epoch/val_seq_acc", val_seq_acc, epoch)

        writer.add_histogram("model/token_embedding", model.token_emb.weight.data.cpu().numpy(), epoch)

        # checkpoint
        ckpt = {
            "model_state": model.state_dict(),
            "optimizer_state": optimizer.state_dict(),
            "args": vars(args),
            "epoch": epoch
        }
        torch.save(ckpt, os.path.join(args.save_dir, f"add_suffix_ckpt_epoch{epoch}.pt"))
        if val_loss < best_val:
            best_val = val_loss
            torch.save(ckpt, os.path.join(args.save_dir, f"ckpt_best.pt"))
            print("  saved best checkpoint.")

        # step scheduler
        scheduler.step()

        # --- log readable decoded samples from both validation and training splits ---
        samples_to_log = min(args.samples_per_epoch, n_val)
        # gather pool indices respecting Subset semantics
        if hasattr(val_set, "indices"):
            val_pool = val_set.indices
        else:
            val_pool = list(range(n_train, n_train + n_val))
        if hasattr(train_set, "indices"):
            train_pool = train_set.indices
        else:
            train_pool = list(range(0, n_train))

        sampled_val = random.sample(list(val_pool), samples_to_log)
        sampled_train = random.sample(list(train_pool), samples_to_log)

        def decode_indices(indices, label):
            texts = []
            for idx in indices:
                clip_emb_np = dataset.clip_embeddings[idx]
                clip_emb_t = torch.tensor(clip_emb_np, dtype=torch.float32)
                gen_ids, gen_text = greedy_decode(
                    model, clip_emb_t, max_len=args.max_len, device=args.device,
                    start_token_id=bos_id,
                    eos_token_id=(tokenizer.eos_token_id if tokenizer is not None else None),
                    tokenizer=tokenizer
                )
                # 如果 tokenizer 不可用，用 ids 字符串作为备用
                if (gen_text is None or gen_text == ""):
                    if tokenizer is not None:
                        gen_text = ""
                    else:
                        gen_text = " ".join([str(x) for x in gen_ids])
                gt_ids = dataset.token_sequences[idx]
                gt_text = None
                if tokenizer is not None:
                    gt_text = tokenizer.decode(gt_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
                else:
                    gt_text = " ".join([str(x) for x in gt_ids])
                entry = f"{label}_sample_index={idx}\npred_ids={gen_ids}\npred_text={gen_text}\ntrue_ids={gt_ids}\ntrue_text={gt_text}\n------"
                texts.append(entry)
            return texts

        val_texts = decode_indices(sampled_val, "val")
        train_texts = decode_indices(sampled_train, "train")
        writer.add_text(f"decoded/val_epoch_{epoch}", "\n".join(val_texts), epoch)
        writer.add_text(f"decoded/train_epoch_{epoch}", "\n".join(train_texts), epoch)

    print("Training finished.")
    writer.flush()
    writer.close()


if __name__ == "__main__":
    main()
