"""
训练相关的工具函数和数据处理类
"""

import logging
import numpy as np
import os
import random
from datetime import datetime
from typing import Dict, List, Optional

import torch
import sacrebleu
from tokenizers import Tokenizer
from torch.nn.utils.rnn import pad_sequence


def setup_logger(name: str, level: int = logging.INFO) -> logging.Logger:
    """Setup logger with consistent formatting."""
    logger = logging.getLogger(name)
    if not logger.handlers:
        logging.basicConfig(
            format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
            datefmt="%m/%d/%Y %H:%M:%S",
            handlers=[logging.StreamHandler()],
            level=level,
        )
    return logger


def decode_tokens(tokenizer, token_ids: np.ndarray, pad_id: int, bos_id: int, eos_id: int) -> str:
    """将token IDs解码为文本，同时处理numpy数组"""
    # 移除padding和特殊token
    tokens = []
    for token_id in token_ids:
        if token_id == pad_id or token_id == bos_id or token_id == eos_id:
            continue
        tokens.append(token_id.item())

    if not tokens:
        return ""

    return tokenizer.decode(tokens)


def calc_bleu_score(
    preds: np.ndarray,
    labels: np.ndarray,
    tokenizer: Tokenizer,
    pad_token_id: int,
    bos_token_id: int,
    eos_token_id: int
) -> float:
    """Compute BLEU score for predictions and labels."""
    # Replace -100 in labels with pad_token_id for decoding
    labels = np.where(labels == -100, pad_token_id, labels)

    # Ensure 2D int lists
    if isinstance(preds, tuple):
        preds = preds[0]
    preds_list = [row.tolist() for row in np.asarray(preds, dtype=np.int64)]
    labels_list = [row.tolist() for row in np.asarray(labels, dtype=np.int64)]

    # Proactively strip special tokens
    def strip_special(ids_list):
        return [i for i in ids_list if i not in (pad_token_id, bos_token_id, eos_token_id)]

    def trim_at_eos(ids_list):
        if eos_token_id in ids_list:
            idx = ids_list.index(eos_token_id)
            return ids_list[:idx]
        return ids_list

    preds_list_stripped = [strip_special(trim_at_eos(x)) for x in preds_list]
    labels_list_stripped = [strip_special(trim_at_eos(x)) for x in labels_list]

    # Use tokenizers.Tokenizer API
    decoded_preds = tokenizer.decode_batch(preds_list_stripped, skip_special_tokens=False)
    decoded_labels = tokenizer.decode_batch(labels_list_stripped, skip_special_tokens=False)

    # 去除两端空白
    decoded_preds = [s.strip() for s in decoded_preds]
    decoded_labels = [s.strip() for s in decoded_labels]

    # sacrebleu expects list of preds and list[list] of refs
    references = [[ref] for ref in decoded_labels]
    bleu = sacrebleu.corpus_bleu(decoded_preds, references)

    return bleu.score


class Seq2SeqDataCollator:
    """
    Data collator for sequence-to-sequence tasks.
    """
    def __init__(self, pad_token_id: int, label_pad_token_id: int = -100):
        self.pad_token_id = pad_token_id
        self.label_pad_token_id = label_pad_token_id

    def __call__(self, features: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
        # The dataset is already formatted to torch tensors due to `set_format(type="torch")`.
        # We can just access them directly without converting.
        src_ids = [f["src_ids"] for f in features]
        tgt_ids = [f["tgt_ids"] for f in features]

        input_ids = pad_sequence(src_ids, batch_first=True, padding_value=self.pad_token_id)

        # As in train.py, we manually create decoder_input_ids and labels
        padded_tgt_ids = pad_sequence(tgt_ids, batch_first=True, padding_value=self.pad_token_id)

        decoder_input_ids = padded_tgt_ids[:, :-1]
        labels = padded_tgt_ids[:, 1:].contiguous()

        # The attention mask needs to correspond to the decoder_input_ids
        decoder_attention_mask = (decoder_input_ids != self.pad_token_id).long()

        # Replace padding with -100 for loss calculation
        labels.masked_fill_(labels == self.pad_token_id, self.label_pad_token_id)

        attention_mask = (input_ids != self.pad_token_id).long()

        return {
            "input_ids": input_ids,
            "attention_mask": attention_mask,
            "decoder_input_ids": decoder_input_ids,
            "labels": labels,
            "decoder_attention_mask": decoder_attention_mask,
        }


class SampleGenerator:
    """生成和保存翻译样本的工具类"""

    def __init__(self, model, tokenizer, val_dataset, pad_token_id: int,
                 bos_token_id: int, eos_token_id: int, num_samples: int = 10):
        self.model = model
        self.tokenizer = tokenizer
        self.val_dataset = val_dataset
        self.pad_token_id = pad_token_id
        self.bos_token_id = bos_token_id
        self.eos_token_id = eos_token_id
        self.num_samples = num_samples

    def generate_and_save_samples(self, output_dir: str, current_step: int) -> str:
        """生成翻译样本并保存到文件"""
        import random

        # 创建样本输出目录
        os.makedirs(output_dir, exist_ok=True)
        sample_filepath = os.path.join(output_dir, f"samples_step_{current_step}.txt")

        # 采样和翻译逻辑
        samples_to_log = []
        indices = random.sample(range(len(self.val_dataset)),
                               min(self.num_samples, len(self.val_dataset)))

        with open(sample_filepath, "w", encoding="utf-8") as f:
            f.write(f"=== 翻译样本 - Step {current_step}\t"
                   f"Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")

            for i, idx in enumerate(indices):
                val_item = self.val_dataset[idx]
                src_ids = val_item["src_ids"].unsqueeze(0).to(self.model.device)
                tgt_ids = val_item["tgt_ids"].unsqueeze(0).to(self.model.device)

                # Create attention mask for encoder
                src_attention_mask = (src_ids != self.pad_token_id).long().to(self.model.device)

                # 生成翻译
                self.model.eval()
                generated_ids = self.model.generate(
                    input_ids=src_ids,
                    attention_mask=src_attention_mask,
                    max_length=self.model.generation_config.max_length,
                    num_beams=self.model.generation_config.num_beams,
                    pad_token_id=self.pad_token_id,
                    bos_token_id=self.bos_token_id,
                    eos_token_id=self.eos_token_id,
                    decoder_start_token_id=self.bos_token_id,
                    min_new_tokens=self.model.generation_config.min_new_tokens,
                )
                self.model.train()

                # 解码
                src_text = decode_tokens(self.tokenizer, src_ids[0].cpu().numpy(),
                                        self.pad_token_id, self.bos_token_id, self.eos_token_id)
                tgt_text = decode_tokens(self.tokenizer, tgt_ids[0].cpu().numpy(),
                                        self.pad_token_id, self.bos_token_id, self.eos_token_id)
                pred_text = decode_tokens(self.tokenizer, generated_ids[0].cpu().numpy(),
                                         self.pad_token_id, self.bos_token_id, self.eos_token_id)

                sample_entry = {
                    "index": idx,
                    "source": src_text,
                    "target": tgt_text,
                    "prediction": pred_text
                }
                samples_to_log.append(sample_entry)

                f.write(f"样本 {i+1}:\n")
                f.write(f"源: {src_text}\n")
                f.write(f"目标: {tgt_text}\n")
                f.write(f"预测: {pred_text}\n")
                f.write("-" * 50 + "\n")
            f.write("\n" + "=" * 80 + "\n\n")

        return sample_filepath