"""
学生模型（蒸馏小模型）定义
包含模型架构、损失函数和训练逻辑
"""

import torch
import torch.nn as nn
from transformers import AutoConfig
from typing import Dict, Any

class StudentTransformer(nn.Module):
    def __init__(self, 
                 vocab_size: int = 32000,
                 hidden_size: int = 512,
                 num_layers: int = 6,
                 num_attention_heads: int = 8,
                 max_position_embeddings: int = 2048):
        """
        初始化学生模型
        Args:
            vocab_size: 词表大小
            hidden_size: 隐藏层维度
            num_layers: Transformer层数
            num_attention_heads: 注意力头数
            max_position_embeddings: 最大位置编码长度
        """
        super().__init__()
        
        # 使用较小的配置初始化模型
        self.config = AutoConfig.from_pretrained(
            "gpt2",  # 使用GPT2的基础配置
            vocab_size=vocab_size,
            n_embd=hidden_size,
            n_layer=num_layers,
            n_head=num_attention_heads,
            n_positions=max_position_embeddings
        )
        
        # 定义模型组件
        self.embedding = nn.Embedding(vocab_size, hidden_size)
        self.position_embedding = nn.Embedding(max_position_embeddings, hidden_size)
        self.transformer_layers = nn.ModuleList([
            TransformerLayer(hidden_size, num_attention_heads)
            for _ in range(num_layers)
        ])
        self.layer_norm = nn.LayerNorm(hidden_size)
        self.output_projection = nn.Linear(hidden_size, vocab_size)

    def forward(self, input_ids: torch.Tensor) -> Dict[str, torch.Tensor]:
        """前向传播"""
        # 获取位置编码
        position_ids = torch.arange(input_ids.size(1), device=input_ids.device)
        position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        
        # 计算嵌入
        embeddings = self.embedding(input_ids)
        position_embeddings = self.position_embedding(position_ids)
        hidden_states = embeddings + position_embeddings
        
        # 通过Transformer层
        for layer in self.transformer_layers:
            hidden_states = layer(hidden_states)
        
        hidden_states = self.layer_norm(hidden_states)
        logits = self.output_projection(hidden_states)
        
        return {
            "logits": logits,
            "hidden_states": hidden_states
        }

    def save_architecture(self, output_dir: str = "训练示例/学生模型") -> None:
        """保存模型架构配置"""
        import json
        import os
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存模型配置
        config = {
            "vocab_size": self.config.vocab_size,
            "hidden_size": self.config.n_embd,
            "num_layers": self.config.n_layer,
            "num_attention_heads": self.config.n_head,
            "max_position_embeddings": self.config.n_positions
        }
        
        config_file = os.path.join(output_dir, "model_architecture.json")
        with open(config_file, "w", encoding="utf-8") as f:
            json.dump(config, f, indent=2, ensure_ascii=False)

    def save_initial_weights(self, output_dir: str = "训练示例/学生模型/initial_weights") -> None:
        """保存初始化权重"""
        import os
        import torch
        
        os.makedirs(output_dir, exist_ok=True)
        torch.save(self.state_dict(), os.path.join(output_dir, "initial_weights.pt"))

class TransformerLayer(nn.Module):
    """单个Transformer层的实现"""
    def __init__(self, hidden_size: int, num_attention_heads: int):
        super().__init__()
        self.attention = nn.MultiheadAttention(hidden_size, num_attention_heads)
        self.feed_forward = nn.Sequential(
            nn.Linear(hidden_size, hidden_size * 4),
            nn.GELU(),
            nn.Linear(hidden_size * 4, hidden_size)
        )
        self.layer_norm1 = nn.LayerNorm(hidden_size)
        self.layer_norm2 = nn.LayerNorm(hidden_size)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 自注意力
        norm_x = self.layer_norm1(x)
        attention_output, _ = self.attention(norm_x, norm_x, norm_x)
        x = x + attention_output
        
        # 前馈网络
        norm_x = self.layer_norm2(x)
        ff_output = self.feed_forward(norm_x)
        x = x + ff_output
        
        return x

class DistillationLoss(nn.Module):
    """知识蒸馏损失函数"""
    def __init__(self, temperature: float = 2.0, alpha: float = 0.5):
        super().__init__()
        self.temperature = temperature
        self.alpha = alpha
        self.kl_div = nn.KLDivLoss(reduction="batchmean")
        self.ce_loss = nn.CrossEntropyLoss()
        
    def forward(self, 
                student_logits: torch.Tensor, 
                teacher_logits: torch.Tensor,
                labels: torch.Tensor) -> torch.Tensor:
        """
        计算蒸馏损失
        Args:
            student_logits: 学生模型的输出logits
            teacher_logits: 教师模型的输出logits
            labels: 真实标签
        Returns:
            总损失
        """
        # 软目标损失（KL散度）
        soft_loss = self.kl_div(
            torch.log_softmax(student_logits / self.temperature, dim=-1),
            torch.softmax(teacher_logits / self.temperature, dim=-1)
        ) * (self.temperature ** 2)
        
        # 硬目标损失（交叉熵）
        hard_loss = self.ce_loss(student_logits, labels)
        
        # 总损失
        total_loss = self.alpha * soft_loss + (1 - self.alpha) * hard_loss
        return total_loss 