# src/hfco/modeling_transformer.py

import torch
from typing import Optional, Dict

from transformers import PreTrainedModel, GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput, BaseModelOutput
from .configuration_transformer import TransformerConfig
from .components import Transformer_Seq2Seq


class TranslationTransformerSeq2Seq(PreTrainedModel, GenerationMixin):
    """
    Hugging Face 包装器，用于 Seq2Seq 翻译模型。
    """

    config_class = TransformerConfig

    def __init__(self, config: TransformerConfig):
        super().__init__(config)

        # 将 Seq2Seq 模型实例化为一个子模块
        self.model = Transformer_Seq2Seq(
            src_vocab_size=config.src_vocab_size,
            tgt_vocab_size=config.tgt_vocab_size,
            d_model=config.d_model,
            num_layers=config.num_layers,
            num_heads=config.num_heads,
            d_ff=config.d_ff,
            max_seq_len=config.max_seq_len,
            rope_theta=config.rope_theta,
            share_embeddings=config.share_embeddings,
            use_rope=config.use_rope,
            use_residual=config.use_residual,
            norm_type=config.norm_type,
            norm_position=config.norm_position,
        )

        if config.share_embeddings:
            # Custom weight tying for three-way sharing.
            # This informs `save_pretrained` about the shared tensors.
            # The format should be a flat list of strings.
            self._tied_weights_keys = [
                "model.src_embedding.weight",
                "model.tgt_embedding.weight",
                "model.output_projection.weight",
            ]

        # 后初始化
        self.post_init()

        # 确保有generation_config
        if not hasattr(self, 'generation_config') or self.generation_config is None:
            from transformers import GenerationConfig
            self.generation_config = GenerationConfig.from_model_config(self.config)

    def forward(
        self,
        input_ids: torch.LongTensor,
        attention_mask: Optional[torch.Tensor] = None,
        decoder_input_ids: Optional[torch.LongTensor] = None,
        decoder_attention_mask: Optional[torch.Tensor] = None,
        labels: Optional[torch.LongTensor] = None,
        # 新增：用于兼容 HF generate()
        encoder_outputs: Optional[Dict[str, torch.Tensor]] = None,
        past_key_values: Optional[Dict[str, torch.Tensor]] = None, # 兼容 use_cache
        use_cache: Optional[bool] = None,
        return_dict: Optional[bool] = True,
        **kwargs,
    ) -> Dict[str, torch.Tensor]:

        # The DataCollator now prepares decoder_input_ids and labels,
        # so we don't need the manual creation logic here anymore.
        if decoder_input_ids is None and labels is None:
            # 推理/生成第一步时允许不传 labels，但需要 decoder_start_token_id
            raise ValueError("decoder_input_ids must be provided for training/eval; for generation, HF will supply them via decoder_start_token_id.")

        # 将 HF 的 attention_mask 转换为 PyTorch 风格的 padding_mask
        src_padding_mask = None
        if attention_mask is not None:
            src_padding_mask = attention_mask == 0

        tgt_padding_mask = None
        if decoder_attention_mask is not None:
            tgt_padding_mask = decoder_attention_mask == 0

        # 解析 encoder_hidden_states
        enc_hidden = None
        if encoder_outputs is not None:
            if isinstance(encoder_outputs, dict):
                if "last_hidden_state" in encoder_outputs:
                    enc_hidden = encoder_outputs["last_hidden_state"]
                elif "encoder_outputs" in encoder_outputs:
                    enc_hidden = encoder_outputs["encoder_outputs"]
                elif "encoder_last_hidden_state" in encoder_outputs:
                    enc_hidden = encoder_outputs["encoder_last_hidden_state"]
                else:
                    enc_hidden = None
            else:
                # 兼容 ModelOutput
                enc_hidden = getattr(encoder_outputs, "last_hidden_state", None)

        model_outputs = self.model(
            src_ids=input_ids,
            tgt_ids=decoder_input_ids,
            src_key_padding_mask=src_padding_mask,
            tgt_key_padding_mask=tgt_padding_mask,
            encoder_hidden_states=enc_hidden,
            use_cache=use_cache,
            past_key_values=past_key_values,
        )
        logits = model_outputs["logits"]

        loss = None
        if labels is not None:
            label_smoothing = self.config.label_smoothing if hasattr(self.config, 'label_smoothing') else 0.0
            loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-100, label_smoothing=label_smoothing)
            loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))

        if not return_dict:
            return (loss, logits)

        return Seq2SeqLMOutput(
            loss=loss,
            logits=logits,
            past_key_values=model_outputs.get("past_key_values"),
            encoder_last_hidden_state=model_outputs.get("encoder_outputs"),
        )

    def get_encoder(self):
        # 提供给 HF 以便预先计算并缓存 encoder 输出
        class _EncoderWrapper(torch.nn.Module):
            def __init__(self, inner):
                super().__init__()
                self.inner = inner
            def forward(self, input_ids=None, attention_mask=None, **kwargs):
                src_padding_mask = None
                if attention_mask is not None:
                    src_padding_mask = attention_mask == 0
                enc = self.inner.model.encode(input_ids, src_key_padding_mask=src_padding_mask)
                return BaseModelOutput(last_hidden_state=enc)
        return _EncoderWrapper(self)

    def _prepare_encoder_decoder_kwargs_for_generation(self, inputs_tensor, model_kwargs, model_input_name=None, generation_config=None, **kwargs):
        # 与 HF 约定：预计算 encoder_outputs 并塞回 model_kwargs
        if model_kwargs.get("encoder_outputs", None) is None:
            encoder = self.get_encoder()
            encoder_outputs = encoder(inputs_tensor, attention_mask=model_kwargs.get("attention_mask", None))
            model_kwargs["encoder_outputs"] = encoder_outputs
        return model_kwargs

    def prepare_inputs_for_generation(
        self,
        decoder_input_ids,
        past_key_values=None,
        attention_mask=None,
        encoder_outputs=None,
        **kwargs
    ):
        if past_key_values is not None:
            decoder_input_ids = decoder_input_ids[:, -1:]
        return {
            "input_ids": None,
            "decoder_input_ids": decoder_input_ids,
            "encoder_outputs": encoder_outputs,
            "past_key_values": past_key_values,
            "attention_mask": attention_mask,
            "use_cache": kwargs.get("use_cache", True),
        }
