import einops
import torch
from torch import Tensor
from torch.nn import Module, Parameter, Dropout
from torch.nn import init
from jaxtyping import Float, Int, Bool


class Linear(Module):
    def __init__(
        self,
        d_in: int,
        d_out: int,
        bias: bool = False,
        device=None,
        dtype=None,
    ) -> None:
        super().__init__()
        self.weight = Parameter(torch.empty(d_out, d_in, dtype=dtype, device=device))
        if bias:
            self.bias = Parameter(torch.empty(d_out, device=device))
        else:
            self.register_parameter("bias", None)
        self._initialize_weights()

    def _initialize_weights(self):
        init.xavier_uniform_(self.weight)
        if self.bias is not None:
            init.zeros_(self.bias)

    def forward(
        self, x: Float[Tensor, " ... in_features"]
    ) -> Float[Tensor, " ... out_features"]:
        out = torch.matmul(x, self.weight.T)
        if self.bias is not None:
            out += self.bias
        return out


class Embedding(Module):
    def __init__(
        self,
        num_embeddings: int,
        embedding_dim: int,
        device=None,
        dtype=None,
    ):
        super().__init__()
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        self.weight = Parameter(
            torch.empty((num_embeddings, embedding_dim), device=device, dtype=dtype)
        )
        self._initialize_weights()

    def _initialize_weights(self):
        torch.nn.init.normal_(self.weight, mean=0, std=self.embedding_dim**-0.5)

    def forward(self, x: Int[Tensor, " ... id"]) -> Float[Tensor, "... emb_dim"]:
        return self.weight[x] * (self.embedding_dim**0.5)


class RMSNorm(Module):
    def __init__(self, d_model: int, eps: float = 1e-5, device=None, dtype=None):
        super().__init__()
        self.d_model = d_model
        self.eps = eps
        self.weight = Parameter(torch.ones(d_model, device=device, dtype=dtype))

    def forward(
        self, x: Float[Tensor, "B seq_len d_model"]
    ) -> Float[Tensor, "B seq_len d_model"]:
        in_dtype = x.dtype
        x = x.to(dtype=torch.float32)

        mean_square = x.pow(2).mean(-1, keepdim=True)
        rms = torch.sqrt(mean_square + self.eps)
        result = x / rms * self.weight
        return result.to(in_dtype)


class LayerNorm(Module):
    """
    2017年原始Transformer使用的LayerNorm（带bias）
    """
    def __init__(self, d_model: int, eps: float = 1e-5, device=None, dtype=None):
        super().__init__()
        self.d_model = d_model
        self.eps = eps
        self.weight = Parameter(torch.ones(d_model, device=device, dtype=dtype))
        self.bias = Parameter(torch.zeros(d_model, device=device, dtype=dtype))

    def forward(
        self, x: Float[Tensor, "B seq_len d_model"]
    ) -> Float[Tensor, "B seq_len d_model"]:
        in_dtype = x.dtype
        x = x.to(dtype=torch.float32)
        
        mean = x.mean(-1, keepdim=True)
        var = x.var(-1, keepdim=True, unbiased=False)
        result = (x - mean) / torch.sqrt(var + self.eps) * self.weight + self.bias
        return result.to(in_dtype)


class SiLU(Module):

    def __init__(self):
        super().__init__()

    def forward(self, x: Float[Tensor, "..."]) -> Float[Tensor, "..."]:
        return x / (1 + torch.exp(-x))


class SwiGLU(Module):
    def __init__(
        self, d_model: int, d_ff: int, dropout: float = 0.1, device=None, dtype=None
    ):
        super().__init__()
        self.d_model = d_model
        self.d_ff = d_ff
        self.w1 = Linear(
            self.d_model, self.d_ff, bias=False, device=device, dtype=dtype
        )
        self.w2 = Linear(
            self.d_ff, self.d_model, bias=False, device=device, dtype=dtype
        )
        self.w3 = Linear(
            self.d_model, self.d_ff, bias=False, device=device, dtype=dtype
        )
        self.gate = SiLU()
        self.dropout = Dropout(dropout)

    def forward(
        self, x: Float[Tensor, "... dim"]
    ) -> Float[Tensor, "B seq_len d_model"]:
        gate = self.gate(self.w1(x))
        up = self.w3(x)
        return self.w2(self.dropout(gate * up))


class RoPE(Module):
    def __init__(self, theta: float, d_k: int, max_seq_len: int, device=None):
        super().__init__()
        self.theta = theta
        self.d_k = d_k
        self.max_seq_len = max_seq_len
        freqs = 1.0 / (
            self.theta
            ** (
                torch.arange(0, self.d_k, 2, dtype=torch.float32)[: (self.d_k + 1) // 2]
                / self.d_k
            )
        )
        self.register_buffer("freqs", freqs, persistent=False)
        self.to(device)

    def forward(
        self,
        x: Float[Tensor, "... seq_len d_k"],
        token_positions: Float[Tensor, "... seq_len"],
    ) -> Float[Tensor, "... seq_len d_k"]:
        # 确保token_positions在与x相同的设备上
        token_positions = token_positions.to(x.device)

        # 处理不同形状的token_positions
        # 如果x的维度比token_positions多（多头注意力情况），需要扩展token_positions
        if x.dim() > token_positions.dim():
            # 例如：x是[batch, heads, seq_len, d_k]，token_positions是[batch, seq_len]
            # 需要将token_positions扩展为[batch, heads, seq_len]
            # 获取x的形状
            x_shape = x.shape
            # 在token_positions的倒数第二个维度添加一个维度，然后扩展
            token_positions = token_positions.unsqueeze(-2)
            # 扩展到与x的形状匹配（除了最后一个维度d_k）
            expand_shape = x_shape[:-1]  # 去掉最后一个维度d_k
            token_positions = token_positions.expand(expand_shape)

        assert x.shape[-1] % 2 == 0

        freq2pos = (
            token_positions[..., None] * self.freqs
        )  # pyright: ignore[reportOperatorIssue]
        cos = freq2pos.cos()
        sin = freq2pos.sin()

        # 奇数偶数位置
        x_even = x[..., 0::2]
        x_odd = x[..., 1::2]
        x_rope = torch.empty_like(x)
        x_rope[..., 0::2] = x_even * cos - x_odd * sin
        x_rope[..., 1::2] = x_even * sin + x_odd * cos
        return x_rope


class Softmax(Module):
    def __init__(self):
        super().__init__()

    def forward(self, x: Float[Tensor, "..."], dim: int) -> Float[Tensor, "..."]:
        # 使用更数值稳定的softmax实现
        x_max = x.max(dim=dim, keepdim=True)[0]
        x = x - x_max

        # 检查是否包含NaN或inf
        if torch.isnan(x).any() or torch.isinf(x).any():
            # 如果有问题，使用简单的均匀分布
            return torch.ones_like(x) / x.size(dim)

        x_exp = torch.exp(x)
        fenmu = x_exp.sum(dim=dim, keepdim=True).clamp(min=1e-12)

        # 防止除零
        result = x_exp / fenmu
        return result.clamp(min=1e-12, max=1.0)


class ScaledDotProductAttention(Module):
    def __init__(
        self,
    ):
        super().__init__()
        self.softmax = Softmax()

    def forward(
        self,
        Q: Float[Tensor, " ... queries d_k"],
        K: Float[Tensor, " ... keys d_k"],
        V: Float[Tensor, " ... values d_v"],
        mask: Bool[Tensor, " ... queries keys"] | None = None,  # HF convention
    ) -> Float[Tensor, " ... queries d_v"]:
        d_k = Q.shape[-1]
        scores = (Q @ K.transpose(-2, -1)) * d_k**-0.5
        if mask is not None:
            mask = mask.bool()
            mask_value = torch.finfo(scores.dtype).min  # 避免 bf16下精度溢出
            scores.masked_fill_(mask.logical_not(), mask_value)

        attn = self.softmax.forward(scores, dim=-1) @ V
        return attn


class CrossAttention(Module):
    """
    交叉注意力机制，用于 Decoder 中关注 Encoder 的输出
    """

    def __init__(
        self,
        d_model: int,
        num_heads: int,
        device=None,
        dtype=None,
    ):
        super().__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = d_model // num_heads

        # Q, K, V 投影层
        self.q_proj = Linear(d_model, d_model, bias=False, device=device, dtype=dtype)
        self.k_proj = Linear(d_model, d_model, bias=False, device=device, dtype=dtype)
        self.v_proj = Linear(d_model, d_model, bias=False, device=device, dtype=dtype)
        self.o_proj = Linear(d_model, d_model, bias=False, device=device, dtype=dtype)

        self.attn = ScaledDotProductAttention()
        self.to(device)

    def forward(
        self,
        query: Float[Tensor, "batch tgt_seq_len d_model"],
        key_value: Float[Tensor, "batch src_seq_len d_model"],
        key_padding_mask: (
            Bool[Tensor, "batch src_seq_len"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, "batch tgt_seq_len d_model"]:
        attention_mask = None
        if key_padding_mask is not None:
            attention_mask = (
                key_padding_mask.logical_not().unsqueeze(1).unsqueeze(2)
            )  # [batch, 1, 1, src_seq_len]

        # 计算 Q, K, V
        Q = self.q_proj(query)  # [batch, tgt_seq_len, d_model]
        K = self.k_proj(key_value)  # [batch, src_seq_len, d_model]
        V = self.v_proj(key_value)  # [batch, src_seq_len, d_model]

        # 重塑为多头形式
        Q = einops.rearrange(Q, "b s (h dk) -> b h s dk", h=self.num_heads)
        K = einops.rearrange(K, "b s (h dk) -> b h s dk", h=self.num_heads)
        V = einops.rearrange(V, "b s (h dv) -> b h s dv", h=self.num_heads)

        # 计算注意力
        attn_output = self.attn(Q, K, V, attention_mask)

        # 重塑回原始形状
        attn_output = einops.rearrange(attn_output, "b h s dv -> b s (h dv)")

        # 输出投影
        output = self.o_proj(attn_output)
        return output


class MultiHeadSelfAttention(Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        max_seq_len: int,
        rope_theta: float = 10000.0,
        use_rope: bool = True,
        causal: bool = True,
        device=None,
        dtype=None,
    ):
        super(MultiHeadSelfAttention, self).__init__()
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = d_model // num_heads
        self.d_v = self.d_k
        self.max_seq_len = max_seq_len
        self.use_rope = use_rope
        self.device = device
        self.dtype = dtype
        # ----------------------------
        self.rope = RoPE(rope_theta, self.d_k, self.max_seq_len, device=device)
        self.q_proj, self.k_proj, self.v_proj, self.o_proj = [
            Linear(d_model, d_model, bias=False, device=device, dtype=dtype)
            for _ in range(4)
        ]
        self.attn = ScaledDotProductAttention()
        if causal:
            causal_mask = torch.tril(
                torch.ones(max_seq_len, max_seq_len, dtype=torch.bool, device=device)
            )
            self.register_buffer("causal_mask", causal_mask, persistent=False)
        else:
            self.register_buffer("causal_mask", None, persistent=False)
        self.to(device)

    def forward(
        self,
        in_features: Float[Tensor, " ... sequence_length d_in"],
        token_positions: Int[Tensor, " ... sequence_length"] | None = None,
        padding_mask: (
            Bool[Tensor, " ... sequence_length"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, " ... sequence_length d_out"]:
        seq_len = in_features.shape[-2]
        # -------------------------------
        # 1. 拆分成多个头方便批量计算
        # 1.1 利用投影计算 Q, K, V
        W_q = self.q_proj(in_features)  # [..., seq_len, d_model]
        W_k = self.k_proj(in_features)
        W_v = self.v_proj(in_features)
        # 1.2 更改QKV维度(batch, num_heads, seq_len, head_dim)
        W_q = einops.rearrange(W_q, "... s (h dk) -> ... h s dk", h=self.num_heads)
        W_k = einops.rearrange(W_k, "... s (h dk) -> ... h s dk", h=self.num_heads)
        W_v = einops.rearrange(W_v, "... s (h dv) -> ... h s dv", h=self.num_heads)
        if self.use_rope:
            W_q = self.rope(W_q, token_positions)
            W_k = self.rope(W_k, token_positions)
        # -------------------------------
        # 2. 计算 self-attention
        # mask is Hugging Face convention: True for keep, False for mask
        mask = None
        if self.causal_mask is not None:
            mask = self.causal_mask[:seq_len, :seq_len].clone()  # type: ignore
        if padding_mask is not None:
            # padding_mask is PyTorch style (True for padding), convert to HF style (True for keep)
            attention_mask = (
                padding_mask.logical_not().unsqueeze(1).unsqueeze(2)
            )  # [B, 1, 1, seq_len]
            if mask is not None:
                mask = mask & attention_mask
            else:
                mask = attention_mask
        out = self.attn(
            W_q, W_k, W_v, mask=mask
        )  # (batch_size, seq_len, num_heads, head_dim)

        # -------------------------------
        # 3. 拼接多个头的结果
        out = einops.rearrange(out, "... h s dv -> ... s (h dv)")
        # -------------------------------
        # 4. 结合输出层
        out = self.o_proj(out)
        return out


class DecoderBlock(Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        d_ff: int,
        max_seq_len: int,
        theta: float = 10000.0,
        use_rope: bool = True,
        use_residual: bool = True,
        norm_type: str = "rms",  # "rms" or "layer"
        norm_position: str = "post",  # "pre" or "post"
        device=None,
        dtype=None,
    ):
        super(DecoderBlock, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_ff = d_ff
        self.max_seq_len = max_seq_len
        self.theta = theta
        self.use_rope = use_rope
        self.use_residual = use_residual
        self.norm_position = norm_position
        self.device = device
        self.dtype = dtype
        
        # 创建Norm层
        NormClass = RMSNorm if norm_type == "rms" else LayerNorm
        
        # ----------------------------
        # 自注意力层
        self.self_attn = MultiHeadSelfAttention(
            d_model,
            num_heads,
            max_seq_len=max_seq_len,
            rope_theta=theta,
            use_rope=use_rope,
            causal=True,  # Decoder 使用因果掩码
            device=device,
            dtype=dtype,
        )

        # 交叉注意力层（关注 Encoder 输出）
        self.cross_attn = CrossAttention(
            d_model,
            num_heads,
            device=device,
            dtype=dtype,
        )

        self.ffn = SwiGLU(d_model, d_ff, device=device, dtype=dtype)
        self.ln1 = NormClass(d_model, device=device, dtype=dtype)
        self.ln2 = NormClass(d_model, device=device, dtype=dtype)
        self.ln3 = NormClass(d_model, device=device, dtype=dtype)
        self.to(device)

    def forward(
        self,
        x: Float[Tensor, " batch seq_len d_model"],
        token_positions: Int[Tensor, " batch seq_len"] | None = None,
        encoder_output: Float[Tensor, " batch src_seq_len d_model"] | None = None,
        src_key_padding_mask: (
            Bool[Tensor, "batch src_len"] | None
        ) = None,  # PyTorch convention
        tgt_key_padding_mask: (
            Bool[Tensor, "batch seq_len"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, " batch seq_len d_model"]:
        # 自注意力
        if self.norm_position == "pre":
            attn_input = self.ln1(x)
        else:
            attn_input = x
        
        attn_output = self.self_attn(
            attn_input,
            token_positions,
            padding_mask=tgt_key_padding_mask,
        )
        
        if self.norm_position == "post":
            attn_output = self.ln1(attn_output)
        
        if self.use_residual:
            y = x + attn_output
        else:
            y = attn_output

        # 交叉注意力（如果有 encoder_output）
        if encoder_output is not None:
            if self.norm_position == "pre":
                cross_input = self.ln2(y)
            else:
                cross_input = y
            
            cross_output = self.cross_attn(
                cross_input, encoder_output, key_padding_mask=src_key_padding_mask
            )
            
            if self.norm_position == "post":
                cross_output = self.ln2(cross_output)
            
            if self.use_residual:
                y = y + cross_output
            else:
                y = cross_output

        # 前馈网络
        if self.norm_position == "pre":
            ffn_input = self.ln3(y)
        else:
            ffn_input = y
        
        ffn_output = self.ffn(ffn_input)
        
        if self.norm_position == "post":
            ffn_output = self.ln3(ffn_output)
        
        if self.use_residual:
            result = y + ffn_output
        else:
            result = ffn_output
        
        return result


class EncoderBlock(Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        d_ff: int,
        max_seq_len: int,
        theta: float = 10000.0,
        use_rope: bool = True,
        use_residual: bool = True,
        norm_type: str = "rms",  # "rms" or "layer"
        norm_position: str = "post",  # "pre" or "post"
        device=None,
        dtype=None,
    ):
        super(EncoderBlock, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_ff = d_ff
        self.max_seq_len = max_seq_len
        self.theta = theta
        self.use_rope = use_rope
        self.use_residual = use_residual
        self.norm_position = norm_position
        self.device = device
        self.dtype = dtype
        
        # 创建Norm层
        NormClass = RMSNorm if norm_type == "rms" else LayerNorm
        
        # ----------------------------
        self.attn = MultiHeadSelfAttention(
            d_model,
            num_heads,
            max_seq_len=max_seq_len,
            rope_theta=theta,
            use_rope=use_rope,
            causal=False,  # 不使用因果掩码
            device=device,
            dtype=dtype,
        )
        self.ffn = SwiGLU(d_model, d_ff, device=device, dtype=dtype)
        self.ln1 = NormClass(d_model, device=device, dtype=dtype)
        self.ln2 = NormClass(d_model, device=device, dtype=dtype)
        self.to(device)

    def forward(
        self,
        x: Float[Tensor, " batch seq_len d_model"],
        token_positions: Int[Tensor, " batch seq_len"] | None = None,
        padding_mask: (
            Bool[Tensor, " batch seq_len"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, " batch seq_len d_model"]:
        # 自注意力
        if self.norm_position == "pre":
            attn_input = self.ln1(x)
        else:
            attn_input = x
        
        attn_output = self.attn(attn_input, token_positions, padding_mask)
        
        if self.norm_position == "post":
            attn_output = self.ln1(attn_output)
        
        if self.use_residual:
            y = x + attn_output
        else:
            y = attn_output
        
        # 前馈网络
        if self.norm_position == "pre":
            ffn_input = self.ln2(y)
        else:
            ffn_input = y
        
        ffn_output = self.ffn(ffn_input)
        
        if self.norm_position == "post":
            ffn_output = self.ln2(ffn_output)
        
        if self.use_residual:
            result = y + ffn_output
        else:
            result = ffn_output
        
        return result


class DecoderOnly(Module):
    def __init__(
        self,
        vocab_size: int,
        context_length: int,
        d_model: int,
        num_layers: int,
        num_heads: int,
        d_ff: int,
        rope_theta: float,
        device=None,
        dtype=None,
    ):
        super().__init__()
        self.token_embeddings = Embedding(
            vocab_size, d_model, device=device, dtype=dtype
        )
        self.layers = torch.nn.ModuleList(
            [
                DecoderBlock(
                    d_model=d_model,
                    num_heads=num_heads,
                    d_ff=d_ff,
                    max_seq_len=context_length,
                    theta=rope_theta,
                    device=device,
                    dtype=dtype,
                )
                for _ in range(num_layers)
            ]
        )
        self.ln_final = RMSNorm(d_model, device=device, dtype=dtype)
        self.lm_head = Linear(
            d_model, vocab_size, bias=False, device=device, dtype=dtype
        )
        # 权重绑定：lm_head.weight 与 token_embeddings.weight 共享
        self.lm_head.weight = self.token_embeddings.weight
        self.to(device)

    def forward(
        self,
        in_indices: Int[Tensor, " batch_size sequence_length"],
        padding_mask: (
            Bool[Tensor, " batch_size sequence_length"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, " batch_size sequence_length vocab_size"]:
        seq_len = in_indices.shape[-1]
        token_positions = torch.arange(seq_len, device=in_indices.device).unsqueeze(0)
        x = self.token_embeddings(in_indices)
        for layer in self.layers:
            x = layer(x, token_positions, tgt_key_padding_mask=padding_mask)
        x = self.ln_final(x)
        logits = self.lm_head(x)
        return logits


class Transformer_Seq2Seq(Module):
    def __init__(
        self,
        src_vocab_size: int,
        tgt_vocab_size: int,
        d_model: int,
        num_layers: int,
        num_heads: int,
        d_ff: int,
        max_seq_len: int,
        rope_theta: float = 10000.0,
        share_embeddings: bool = False,
        use_rope: bool = True,
        use_residual: bool = True,
        norm_type: str = "rms",
        norm_position: str = "post",
        device=None,
        dtype=None,
    ):
        super().__init__()

        self.src_vocab_size = src_vocab_size
        self.tgt_vocab_size = tgt_vocab_size
        self.d_model = d_model
        self.num_layers = num_layers
        self.num_heads = num_heads
        self.d_ff = d_ff
        self.max_seq_len = max_seq_len
        self.rope_theta = rope_theta
        
        # 创建Norm层
        NormClass = RMSNorm if norm_type == "rms" else LayerNorm

        # Encoder 部分
        self.src_embedding = Embedding(
            src_vocab_size, d_model, device=device, dtype=dtype
        )
        self.encoder_layers = torch.nn.ModuleList(
            [
                EncoderBlock(
                    d_model=d_model,
                    num_heads=num_heads,
                    d_ff=d_ff,
                    max_seq_len=max_seq_len,
                    theta=rope_theta,
                    use_rope=use_rope,
                    use_residual=use_residual,
                    norm_type=norm_type,
                    norm_position=norm_position,
                    device=device,
                    dtype=dtype,
                )
                for _ in range(num_layers)
            ]
        )
        self.encoder_norm = NormClass(d_model, device=device, dtype=dtype)

        # Decoder 部分
        if share_embeddings and src_vocab_size == tgt_vocab_size:
            # 共享 Encoder 和 Decoder 的嵌入权重
            self.tgt_embedding = self.src_embedding
        else:
            self.tgt_embedding = Embedding(
                tgt_vocab_size, d_model, device=device, dtype=dtype
            )

        self.decoder_layers = torch.nn.ModuleList(
            [
                DecoderBlock(
                    d_model=d_model,
                    num_heads=num_heads,
                    d_ff=d_ff,
                    max_seq_len=max_seq_len,
                    theta=rope_theta,
                    use_rope=use_rope,
                    use_residual=use_residual,
                    norm_type=norm_type,
                    norm_position=norm_position,
                    device=device,
                    dtype=dtype,
                )
                for _ in range(num_layers)
            ]
        )
        self.decoder_norm = NormClass(d_model, device=device, dtype=dtype)

        # 输出层
        self.output_projection = Linear(
            d_model, tgt_vocab_size, bias=False, device=device, dtype=dtype
        )

        # 如果共享嵌入，也共享输出投影权重
        if share_embeddings and src_vocab_size == tgt_vocab_size:
            self.output_projection.weight = self.src_embedding.weight

        self.to(device)

    def encode(
        self,
        src_ids: Int[Tensor, "batch_size src_seq_len"],
        src_positions: Int[Tensor, "batch_size src_seq_len"] | None = None,
        src_key_padding_mask: (
            Bool[Tensor, "batch src_len"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, "batch_size src_seq_len d_model"]:
        src_seq_len = src_ids.shape[-1]
        if src_positions is None:
            src_positions = (
                torch.arange(src_seq_len, device=src_ids.device)
                .unsqueeze(0)
                .expand_as(src_ids)
            )

        # 嵌入
        x = self.src_embedding(src_ids)

        # 编码层
        for layer in self.encoder_layers:
            x = layer(x, src_positions, src_key_padding_mask)

        # 归一化
        x = self.encoder_norm(x)
        return x

    def decode(
        self,
        tgt_ids: Int[Tensor, "batch_size tgt_seq_len"],
        encoder_output: Float[Tensor, "batch_size src_seq_len d_model"],
        tgt_positions: Int[Tensor, "batch_size tgt_seq_len"] | None = None,
        tgt_key_padding_mask: (
            Bool[Tensor, "batch tgt_len"] | None
        ) = None,  # PyTorch convention
        src_key_padding_mask: (
            Bool[Tensor, "batch src_len"] | None
        ) = None,  # PyTorch convention
    ) -> Float[Tensor, "batch_size tgt_seq_len d_model"]:
        tgt_seq_len = tgt_ids.shape[-1]
        if tgt_positions is None:
            tgt_positions = (
                torch.arange(tgt_seq_len, device=tgt_ids.device)
                .unsqueeze(0)
                .expand_as(tgt_ids)
            )

        # 嵌入
        x = self.tgt_embedding(tgt_ids)

        # 解码层
        for layer in self.decoder_layers:
            x = layer(
                x,
                tgt_positions,
                encoder_output,
                src_key_padding_mask,
                tgt_key_padding_mask,
            )

        # 归一化
        x = self.decoder_norm(x)
        return x

    def forward(
        self,
        src_ids: Int[Tensor, "batch_size src_seq_len"],
        tgt_ids: Int[Tensor, "batch_size tgt_seq_len"],
        src_positions: Int[Tensor, "batch_size src_seq_len"] | None = None,
        tgt_positions: Int[Tensor, "batch_size tgt_seq_len"] | None = None,
        src_key_padding_mask: (
            Bool[Tensor, "batch src_len"] | None
        ) = None,  # PyTorch convention
        tgt_key_padding_mask: (
            Bool[Tensor, "batch tgt_len"] | None
        ) = None,  # PyTorch convention
        encoder_hidden_states: Float[Tensor, "batch_size src_seq_len d_model"] | None = None,
        **kwargs,
    ) -> dict:
        # 编码源序列
        # 在生成（generation）期间，encoder_hidden_states 会被缓存和复用
        if encoder_hidden_states is None:
            # 只有在没有提供缓存时才进行编码
            if src_ids is None:
                raise ValueError("src_ids cannot be None when encoder_hidden_states is not provided.")
            encoder_output = self.encode(src_ids, src_positions, src_key_padding_mask)
        else:
            encoder_output = encoder_hidden_states

        # 解码目标序列
        decoder_output = self.decode(
            tgt_ids,
            encoder_output,
            tgt_positions,
            tgt_key_padding_mask,
            src_key_padding_mask,
        )

        # 输出投影
        logits = self.output_projection(decoder_output)

        # 为了与HF Trainer兼容，返回一个字典
        return {
            "logits": logits,
            "encoder_outputs": encoder_output,
            "past_key_values": None, # 暂未实现缓存
        }

    @torch.no_grad()
    def generate(
        self,
        src_ids: Int[Tensor, "batch_size src_seq_len"],
        max_length: int,
        bos_token_id: int,
        eos_token_id: int,
        pad_token_id: int,
        beam_size: int = 4,
        src_key_padding_mask: (
            Bool[Tensor, "batch src_len"] | None
        ) = None,  # PyTorch convention
        min_new_tokens: int = 0,
        length_penalty_alpha: float = 0.0,
    ) -> Int[Tensor, "batch_size max_length"]:
        self.eval()
        bsz = src_ids.shape[0]
        device = src_ids.device

        encoder_output = self.encode(src_ids, src_key_padding_mask=src_key_padding_mask)

        encoder_output = encoder_output.repeat_interleave(beam_size, dim=0)
        if src_key_padding_mask is not None:
            src_key_padding_mask = src_key_padding_mask.repeat_interleave(
                beam_size, dim=0
            )

        decoded_seqs = torch.full(
            (bsz * beam_size, max_length),
            pad_token_id,
            dtype=torch.long,
            device=device,
        )
        decoded_seqs[:, 0] = bos_token_id

        beam_scores = torch.zeros(bsz, beam_size, device=device)
        beam_scores[:, 1:] = -1e9
        beam_scores = beam_scores.view(-1)

        is_finished = torch.zeros(bsz * beam_size, dtype=torch.bool, device=device)
        
        # 用于长度惩罚
        generated_lengths = torch.ones(bsz * beam_size, dtype=torch.long, device=device)

        for cur_len in range(1, max_length):
            if is_finished.all():
                break

            tgt_ids = decoded_seqs[:, :cur_len]
            decoder_output = self.decode(
                tgt_ids, encoder_output, src_key_padding_mask=src_key_padding_mask
            )
            logits = self.output_projection(decoder_output[:, -1, :])
            log_probs = torch.nn.functional.log_softmax(
                logits, dim=-1
            )  # (bsz * beam_size, vocab_size)

            # 在生成足够token之前屏蔽EOS
            if cur_len < min_new_tokens:
                log_probs[:, eos_token_id] = -torch.inf

            finished_mask = is_finished.unsqueeze(1).expand_as(log_probs)
            log_probs[finished_mask] = -torch.inf
            # 对于已完成的序列，我们强制选择pad_token，其log_prob为0
            # 这确保了已完成的序列不会影响到未完成序列的beam search
            log_probs[is_finished, pad_token_id] = 0.0

            scores = beam_scores.unsqueeze(1) + log_probs
            scores = scores.view(bsz, beam_size * self.tgt_vocab_size)

            next_beam_scores, next_beam_indices = torch.topk(
                scores, beam_size, dim=1, largest=True, sorted=True
            )

            next_token_ids = next_beam_indices % self.tgt_vocab_size
            next_beam_ids = next_beam_indices // self.tgt_vocab_size

            batch_offsets = torch.arange(bsz, device=device) * beam_size
            base_beam_ids = next_beam_ids + batch_offsets.unsqueeze(1)

            # 更新 generated_lengths
            generated_lengths += (~is_finished).long()

            # 重排
            decoded_seqs = decoded_seqs[base_beam_ids.view(-1)]
            decoded_seqs[:, cur_len] = next_token_ids.view(-1)
            
            is_finished = is_finished[base_beam_ids.view(-1)]
            beam_scores = next_beam_scores.view(-1)
            generated_lengths = generated_lengths[base_beam_ids.view(-1)]

            # 更新 is_finished 状态
            is_finished = is_finished | (decoded_seqs[:, cur_len] == eos_token_id)

        # 应用长度惩罚
        if length_penalty_alpha > 0:
            length_penalty = generated_lengths.float() ** length_penalty_alpha
            # 避免除以零
            length_penalty = torch.where(length_penalty > 0, length_penalty, 1.0)
            final_scores = beam_scores / length_penalty
        else:
            final_scores = beam_scores
            
        final_beam_scores = final_scores.view(bsz, beam_size)
        best_beam_indices = torch.argmax(final_beam_scores, dim=1)

        batch_offsets = torch.arange(bsz, device=device) * beam_size
        best_seq_indices = best_beam_indices + batch_offsets

        best_sequences = decoded_seqs[best_seq_indices]

        return best_sequences
