import torch
import math
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from einops import einsum, rearrange, reduce


class RotaryPositionalEmbedding(torch.nn.Module):
    def __init__(
        self,
        theta: float,
        d_k: int,
        max_seq_len: int,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ):
        super().__init__()

        positions = torch.arange(max_seq_len, device=device).unsqueeze(1)
        freqs = torch.arange(0, d_k, 2, device=device) / d_k
        inv_freq = 1.0 / (theta**freqs)
        angles = positions * inv_freq

        self.register_buffer("cos", angles.cos().to(dtype), persistent=False)
        self.register_buffer("sin", angles.sin().to(dtype), persistent=False)

    def forward(self, x: torch.Tensor, token_positions: torch.Tensor) -> torch.Tensor:
        cos_pos = self.cos[token_positions]
        sin_pos = self.sin[token_positions]
        
        # 预分配输出张量
        x_rot = torch.empty_like(x)
        
        # 直接在原地计算，避免额外的rearrange操作
        x_rot[..., 0::2] = x[..., 0::2] * cos_pos - x[..., 1::2] * sin_pos
        x_rot[..., 1::2] = x[..., 0::2] * sin_pos + x[..., 1::2] * cos_pos
        
        return x_rot


class CausalMultiHeadSelfAttention(torch.nn.Module):
    def __init__(self, d_model: int, num_heads: int, device=None, dtype=None, **kwargs):
        super().__init__()

        self.wqkv = torch.nn.Linear(d_model, 3 * d_model, device=device, dtype=dtype)
        self.output_proj = torch.nn.Linear(d_model, d_model, device=device, dtype=dtype)

        self.num_heads = num_heads
        self.d_model = d_model
        self.d_head = d_model // num_heads

    def forward(
        self,
        x: torch.Tensor,
        rope: RotaryPositionalEmbedding | None = None,
        token_positions: torch.Tensor | None = None,
    ) -> torch.Tensor:
        batch_size, seq_len, _ = x.shape
        qkv = self.wqkv(x)

        # Split into separate q, k, v tensors
        q, k, v = qkv.split(self.d_model, dim=2)

        # Reshape from (batch, seq_len, dim) to (batch, heads, seq_len, head_dim)
        q = rearrange(q, "b s (h d) -> b h s d", h=self.num_heads)
        k = rearrange(k, "b s (h d) -> b h s d", h=self.num_heads)
        v = rearrange(v, "b s (h d) -> b h s d", h=self.num_heads)

        if rope is not None:
            if token_positions is None:
                token_positions = torch.arange(seq_len, device=x.device)
            q = rope(q, token_positions)
            k = rope(k, token_positions)

        # Use PyTorch's built-in Flash Attention with causal masking
        y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
        y = rearrange(y, "b h s d -> b s (h d)")
        return self.output_proj(y)


class SwiGLU(torch.nn.Module):
    def __init__(
        self,
        d_model: int,
        d_ff: int,
        device: torch.device | None = None,
        dtype: torch.dtype | None = None,
    ):
        super().__init__()

        self.w1 = torch.nn.Linear(d_model, d_ff, device=device, dtype=dtype)
        self.w2 = torch.nn.Linear(d_ff, d_model, device=device, dtype=dtype)
        self.w3 = torch.nn.Linear(d_model, d_ff, device=device, dtype=dtype)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        a1 = self.w1(x)
        silu = F.silu(a1)
        return self.w2(silu * self.w3(x))


class FlashAttentionTransformerBlock(torch.nn.Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        d_ff: int,
        rope: RotaryPositionalEmbedding | None = None,
        device=None,
        dtype=None,
        **kwargs,
    ):
        super().__init__()

        self.rope = rope

        self.ln1 = torch.nn.RMSNorm(d_model, eps=1e-5, device=device, dtype=dtype)
        self.ln2 = torch.nn.RMSNorm(d_model, eps=1e-5, device=device, dtype=dtype)

        self.attn = CausalMultiHeadSelfAttention(
            d_model, num_heads, device, dtype, **kwargs
        )

        ffn_type = kwargs.get("ffn_type", "swiglu")

        if ffn_type == "swiglu":
            self.ffn = SwiGLU(d_model, d_ff, device, dtype)
        else:
            raise ValueError(f"Unsupported ffn_type: {ffn_type}")

    def forward(self, x: torch.Tensor):
        x = x + self.attn(self.ln1(x), self.rope)
        x = x + self.ffn(self.ln2(x))
        return x

    def forward_with_checkpointing(self, x: torch.Tensor):
        # 优化：使用reentrant=True可以减少内存使用
        # 但在PyTorch 2.0+中，如果使用了torch.compile，建议设置use_reentrant=False
        # 所以我们提供一个可配置的选项
        use_reentrant = getattr(self, 'checkpoint_reentrant', False)
        
        def attn_fn(x):
            return self.attn(self.ln1(x), self.rope)

        def ffn_fn(x):
            return self.ffn(self.ln2(x))

        # 使用梯度检查点
        attn_out = checkpoint(attn_fn, x, use_reentrant=use_reentrant)
        x = x + attn_out
        
        # 清理不需要的变量
        del attn_out
        
        # 使用梯度检查点
        ffn_out = checkpoint(ffn_fn, x, use_reentrant=use_reentrant)
        x = x + ffn_out
        
        # 清理不需要的变量
        del ffn_out

        return x


class FlashAttentionTransformer(torch.nn.Module):
    def __init__(
        self,
        d_model: int,
        num_heads: int,
        d_ff: int,
        vocab_size: int,
        context_length: int,
        num_layers: int,
        rope_theta: float = 10000.0,
        device=None,
        dtype=None,
        **kwargs,
    ):
        super().__init__()

        self.context_length = context_length
        self.token_embeddings = torch.nn.Embedding(
            vocab_size, d_model, device=device, dtype=dtype
        )
        self.use_gradient_checkpointing = kwargs.get("use_gradient_checkpointing", True)
        # 添加一个新参数来控制checkpoint的reentrant模式
        self.checkpoint_reentrant = kwargs.get("checkpoint_reentrant", False)

        if d_model % num_heads != 0:
            raise ValueError("d_model must be divisible by num_heads")

        d_head = d_model // num_heads
        rope = RotaryPositionalEmbedding(
            rope_theta, d_head, context_length, device=device, dtype=dtype
        )

        self.layers = torch.nn.ModuleList(
            [
                FlashAttentionTransformerBlock(
                    d_model, num_heads, d_ff, rope, device, dtype, **kwargs
                )
                for _ in range(num_layers)
            ]
        )
        
        # 将checkpoint_reentrant设置传递给每个层
        for layer in self.layers:
            layer.checkpoint_reentrant = self.checkpoint_reentrant

        self.ln_final = torch.nn.RMSNorm(d_model, eps=1e-5, device=device, dtype=dtype)

        self.lm_head = torch.nn.Linear(d_model, vocab_size, device=device, dtype=dtype)

        if kwargs.get("weight_tying", False):
            self.lm_head.weight = self.token_embeddings.weight

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        batch_size, seq_len = x.shape

        if seq_len > self.context_length:
            raise ValueError(
                f"Input sequence length ({seq_len}) exceeds model context length ({self.context_length})"
            )

        x = self.token_embeddings(x)

        # Process each layer, with optional gradient checkpointing
        for layer in self.layers:
            if self.use_gradient_checkpointing and self.training:
                x = layer.forward_with_checkpointing(x)
            else:
                x = layer(x)

        x = self.ln_final(x)
        x = self.lm_head(x)

        return x
