# -*- coding: utf-8 -*-
"""
索信达-预训练模型
Transformer模型定义 - 实现完整的Transformer架构
包含多头注意力、位置编码、前馈网络、层归一化等核心组件
"""

import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple

from config import config


class PositionalEncoding(nn.Module):
    """位置编码模块 - 为序列添加位置信息"""
    
    def __init__(self, d_model: int, max_len: int = 5000):
        super().__init__()
        self.d_model = d_model
        
        # 创建位置编码矩阵
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        
        # 计算除数项
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * 
                           (-math.log(10000.0) / d_model))
        
        # 应用sin和cos函数
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        # 添加批次维度并注册为buffer
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        Args:
            x: 输入张量 [seq_len, batch_size, d_model]
        Returns:
            添加位置编码后的张量
        """
        x = x + self.pe[:x.size(0), :]
        return x


class MultiHeadAttention(nn.Module):
    """多头注意力机制"""
    
    def __init__(self, d_model: int, n_heads: int, dropout: float = 0.1):
        super().__init__()
        assert d_model % n_heads == 0
        
        self.d_model = d_model
        self.n_heads = n_heads
        self.d_k = d_model // n_heads
        
        # 线性变换层
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_o = nn.Linear(d_model, d_model)
        
        self.dropout = nn.Dropout(dropout)
        
    def scaled_dot_product_attention(self, Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor,
                                   mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        缩放点积注意力
        Args:
            Q, K, V: 查询、键、值矩阵
            mask: 注意力掩码
        Returns:
            注意力输出和注意力权重
        """
        # 计算注意力分数
        scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)
        
        # 应用掩码
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
        
        # 计算注意力权重
        attention_weights = F.softmax(scores, dim=-1)
        attention_weights = self.dropout(attention_weights)
        
        # 计算输出
        output = torch.matmul(attention_weights, V)
        
        return output, attention_weights
    
    def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
                mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        """
        前向传播
        Args:
            query, key, value: 输入张量 [batch_size, seq_len, d_model]
            mask: 注意力掩码
        Returns:
            多头注意力输出
        """
        batch_size, seq_len = query.size(0), query.size(1)
        
        # 线性变换
        Q = self.w_q(query)
        K = self.w_k(key)
        V = self.w_v(value)
        
        # 重塑为多头形式
        Q = Q.view(batch_size, seq_len, self.n_heads, self.d_k).transpose(1, 2)
        K = K.view(batch_size, seq_len, self.n_heads, self.d_k).transpose(1, 2)
        V = V.view(batch_size, seq_len, self.n_heads, self.d_k).transpose(1, 2)
        
        # 调整掩码维度
        if mask is not None:
            mask = mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
        
        # 计算注意力
        attention_output, _ = self.scaled_dot_product_attention(Q, K, V, mask)
        
        # 重塑输出
        attention_output = attention_output.transpose(1, 2).contiguous().view(
            batch_size, seq_len, self.d_model
        )
        
        # 最终线性变换
        output = self.w_o(attention_output)
        
        return output


class FeedForward(nn.Module):
    """前馈神经网络"""
    
    def __init__(self, d_model: int, d_ff: int, dropout: float = 0.1, activation: str = "relu"):
        super().__init__()
        self.linear1 = nn.Linear(d_model, d_ff)
        self.linear2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)
        
        # 选择激活函数
        if activation == "relu":
            self.activation = F.relu
        elif activation == "gelu":
            self.activation = F.gelu
        elif activation == "swish":
            self.activation = lambda x: x * torch.sigmoid(x)
        else:
            raise ValueError(f"不支持的激活函数: {activation}")
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        Args:
            x: 输入张量 [batch_size, seq_len, d_model]
        Returns:
            前馈网络输出
        """
        x = self.linear1(x)
        x = self.activation(x)
        x = self.dropout(x)
        x = self.linear2(x)
        return x


class TransformerBlock(nn.Module):
    """Transformer块 - 包含多头注意力和前馈网络"""
    
    def __init__(self, d_model: int, n_heads: int, d_ff: int, dropout: float = 0.1, activation: str = "relu"):
        super().__init__()
        
        # 多头注意力
        self.attention = MultiHeadAttention(d_model, n_heads, dropout)
        
        # 前馈网络
        self.feed_forward = FeedForward(d_model, d_ff, dropout, activation)
        
        # 层归一化
        self.norm1 = nn.LayerNorm(d_model, eps=config.model.layer_norm_eps)
        self.norm2 = nn.LayerNorm(d_model, eps=config.model.layer_norm_eps)
        
        # Dropout
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        """
        前向传播
        Args:
            x: 输入张量 [batch_size, seq_len, d_model]
            mask: 注意力掩码
        Returns:
            Transformer块输出
        """
        # 多头注意力 + 残差连接 + 层归一化
        attention_output = self.attention(x, x, x, mask)
        x = self.norm1(x + self.dropout(attention_output))
        
        # 前馈网络 + 残差连接 + 层归一化
        ff_output = self.feed_forward(x)
        x = self.norm2(x + self.dropout(ff_output))
        
        return x


class TransformerLanguageModel(nn.Module):
    """Transformer语言模型"""
    
    def __init__(self, vocab_size: int = None, d_model: int = None, n_heads: int = None,
                 n_layers: int = None, d_ff: int = None, max_seq_length: int = None,
                 dropout: float = None, activation: str = None):
        super().__init__()
        
        # 使用配置文件中的参数作为默认值
        self.vocab_size = vocab_size or config.model.vocab_size
        self.d_model = d_model or config.model.d_model
        self.n_heads = n_heads or config.model.n_heads
        self.n_layers = n_layers or config.model.n_layers
        self.d_ff = d_ff or config.model.d_ff
        self.max_seq_length = max_seq_length or config.model.max_seq_length
        self.dropout = dropout or config.model.dropout
        self.activation = activation or config.model.activation
        
        # 词嵌入层
        self.embedding = nn.Embedding(self.vocab_size, self.d_model)
        
        # 位置编码
        if config.model.use_positional_encoding:
            self.positional_encoding = PositionalEncoding(self.d_model, self.max_seq_length)
        else:
            self.positional_encoding = None
        
        # Transformer层
        self.transformer_blocks = nn.ModuleList([
            TransformerBlock(self.d_model, self.n_heads, self.d_ff, self.dropout, self.activation)
            for _ in range(self.n_layers)
        ])
        
        # 输出层
        self.layer_norm = nn.LayerNorm(self.d_model, eps=config.model.layer_norm_eps)
        self.output_projection = nn.Linear(self.d_model, self.vocab_size)
        
        # Dropout
        self.dropout_layer = nn.Dropout(self.dropout)
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _init_weights(self, module):
        """初始化模型权重"""
        if isinstance(module, nn.Linear):
            torch.nn.init.normal_(module.weight, mean=0.0, std=config.model.initializer_range)
            if module.bias is not None:
                torch.nn.init.zeros_(module.bias)
        elif isinstance(module, nn.Embedding):
            torch.nn.init.normal_(module.weight, mean=0.0, std=config.model.initializer_range)
        elif isinstance(module, nn.LayerNorm):
            torch.nn.init.zeros_(module.bias)
            torch.nn.init.ones_(module.weight)
    
    def create_causal_mask(self, seq_len: int, device: torch.device) -> torch.Tensor:
        """创建因果掩码（下三角矩阵）"""
        mask = torch.tril(torch.ones(seq_len, seq_len, device=device))
        return mask.unsqueeze(0).unsqueeze(0)  # [1, 1, seq_len, seq_len]
    
    def create_padding_mask(self, input_ids: torch.Tensor, pad_token_id: int = 0) -> torch.Tensor:
        """创建填充掩码"""
        # 创建填充掩码 [batch_size, seq_len]
        padding_mask = (input_ids != pad_token_id).float()
        
        # 扩展维度用于注意力计算 [batch_size, 1, 1, seq_len]
        padding_mask = padding_mask.unsqueeze(1).unsqueeze(1)
        
        return padding_mask
    
    def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None,
                labels: Optional[torch.Tensor] = None) -> dict:
        """
        前向传播
        Args:
            input_ids: 输入token ids [batch_size, seq_len]
            attention_mask: 注意力掩码 [batch_size, seq_len]
            labels: 标签 [batch_size, seq_len]
        Returns:
            包含logits和loss的字典
        """
        batch_size, seq_len = input_ids.size()
        device = input_ids.device
        
        # 词嵌入
        x = self.embedding(input_ids) * math.sqrt(self.d_model)
        
        # 位置编码
        if self.positional_encoding is not None:
            x = x.transpose(0, 1)  # [seq_len, batch_size, d_model]
            x = self.positional_encoding(x)
            x = x.transpose(0, 1)  # [batch_size, seq_len, d_model]
        
        # Dropout
        x = self.dropout_layer(x)
        
        # 创建掩码
        causal_mask = self.create_causal_mask(seq_len, device)
        
        if attention_mask is not None:
            # 结合填充掩码和因果掩码
            padding_mask = self.create_padding_mask(input_ids)
            combined_mask = causal_mask * padding_mask
        else:
            combined_mask = causal_mask
        
        # 通过Transformer层
        for transformer_block in self.transformer_blocks:
            x = transformer_block(x, combined_mask)
        
        # 层归一化
        x = self.layer_norm(x)
        
        # 输出投影
        logits = self.output_projection(x)
        
        # 计算损失
        loss = None
        if labels is not None:
            # 重塑张量用于损失计算
            shift_logits = logits[..., :-1, :].contiguous()
            shift_labels = labels[..., 1:].contiguous()
            
            # 计算交叉熵损失
            loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
        
        return {
            'logits': logits,
            'loss': loss
        }
    
    def generate(self, input_ids: torch.Tensor, max_length: int = 100, 
                temperature: float = 1.0, top_k: int = 50, top_p: float = 0.9) -> torch.Tensor:
        """
        文本生成
        Args:
            input_ids: 输入token ids [batch_size, seq_len]
            max_length: 最大生成长度
            temperature: 温度参数
            top_k: top-k采样
            top_p: top-p采样
        Returns:
            生成的token ids
        """
        self.eval()
        
        with torch.no_grad():
            for _ in range(max_length):
                # 前向传播
                outputs = self.forward(input_ids)
                logits = outputs['logits']
                
                # 获取最后一个位置的logits
                next_token_logits = logits[:, -1, :] / temperature
                
                # Top-k采样
                if top_k > 0:
                    top_k_logits, top_k_indices = torch.topk(next_token_logits, top_k)
                    next_token_logits = torch.full_like(next_token_logits, float('-inf'))
                    next_token_logits.scatter_(1, top_k_indices, top_k_logits)
                
                # Top-p采样
                if top_p < 1.0:
                    sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True)
                    cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
                    
                    # 移除累积概率超过top_p的token
                    sorted_indices_to_remove = cumulative_probs > top_p
                    sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
                    sorted_indices_to_remove[..., 0] = 0
                    
                    indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
                    next_token_logits[indices_to_remove] = float('-inf')
                
                # 采样下一个token
                probs = F.softmax(next_token_logits, dim=-1)
                next_token = torch.multinomial(probs, num_samples=1)
                
                # 添加到序列中
                input_ids = torch.cat([input_ids, next_token], dim=-1)
                
                # 检查是否生成结束标记
                if next_token.item() == config.data.eos_token:
                    break
        
        return input_ids
    
    def get_model_size(self) -> int:
        """获取模型参数数量"""
        return sum(p.numel() for p in self.parameters())
    
    def get_model_info(self) -> dict:
        """获取模型信息"""
        return {
            'vocab_size': self.vocab_size,
            'd_model': self.d_model,
            'n_heads': self.n_heads,
            'n_layers': self.n_layers,
            'd_ff': self.d_ff,
            'max_seq_length': self.max_seq_length,
            'dropout': self.dropout,
            'activation': self.activation,
            'total_parameters': self.get_model_size()
        }


if __name__ == "__main__":
    # 测试模型
    print("测试Transformer语言模型...")
    
    # 创建模型
    model = TransformerLanguageModel()
    print(f"模型创建成功!")
    print(f"模型参数数量: {model.get_model_size():,}")
    
    # 打印模型信息
    model_info = model.get_model_info()
    print("\n模型配置:")
    for key, value in model_info.items():
        print(f"  {key}: {value}")
    
    # 测试前向传播
    batch_size, seq_len = 2, 10
    input_ids = torch.randint(0, config.model.vocab_size, (batch_size, seq_len))
    attention_mask = torch.ones(batch_size, seq_len)
    labels = torch.randint(0, config.model.vocab_size, (batch_size, seq_len))
    
    print(f"\n测试前向传播...")
    print(f"输入形状: {input_ids.shape}")
    
    outputs = model(input_ids, attention_mask, labels)
    print(f"输出logits形状: {outputs['logits'].shape}")
    print(f"损失: {outputs['loss'].item():.4f}")
    
    # 测试生成
    print(f"\n测试文本生成...")
    generated = model.generate(input_ids[:1], max_length=5)
    print(f"生成序列形状: {generated.shape}")
    
    print("模型测试完成!")