import json
import math
from dataclasses import dataclass
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from transformers import BertTokenizer

# ========== 配置优化 ==========
@dataclass
class DeepseekConfig:
    hidden_size: int = 768
    num_heads: int = 16
    vocab_size: int = 21128
    max_position_embeddings: int = 512
    rope_theta: float = 128000
    attention_dropout: float = 0.1
    q_lora_rank: int = 256
    qk_rope_head_dim: int = 64
    kv_lora_rank: int = 512
    v_head_dim: int = 128
    qk_nope_head_dim: int = 64  # 调整为与rope_head_dim之和等于总头维度
    attention_bias: bool = False
    num_hidden_layers: int = 12
    pad_token_id: int = 0
    intermediate_size: int = 3072

# ========== 模型架构优化 ==========
class DeepseekV2RMSNorm(nn.Module):
    def __init__(self, hidden_size, eps=1e-6):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(hidden_size))
        self.eps = eps

    def forward(self, x):
        dtype = x.dtype
        x = x.float()
        variance = x.pow(2).mean(-1, keepdim=True)
        x = x * torch.rsqrt(variance + self.eps)
        return (self.weight * x).to(dtype)

class DeepseekV2RotaryEmbedding(nn.Module):
    def __init__(self, dim, max_position_embeddings=2048, base=10000):
        super().__init__()
        self.dim = dim
        self.max_positions = max_position_embeddings
        self.base = base
        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
        self.register_buffer("inv_freq", inv_freq, persistent=False)
        self._set_cos_sin_cache()

    def _set_cos_sin_cache(self):
        t = torch.arange(self.max_positions, device=self.inv_freq.device)
        freqs = torch.einsum("i,j->ij", t, self.inv_freq)
        emb = torch.cat((freqs, freqs), dim=-1)
        self.register_buffer("cos_cache", emb.cos())
        self.register_buffer("sin_cache", emb.sin())

    def forward(self, x, seq_len):
        return (
            self.cos_cache[:seq_len].to(x.dtype).unsqueeze(0).unsqueeze(0),
            self.sin_cache[:seq_len].to(x.dtype).unsqueeze(0).unsqueeze(0)
        )

def rotate_half(x):
    x1, x2 = x.chunk(2, dim=-1)
    return torch.cat((-x2, x1), dim=-1)

def apply_rotary_pos_emb(q, k, cos, sin):
    q_embed = q * cos + rotate_half(q) * sin
    k_embed = k * cos + rotate_half(k) * sin
    return q_embed, k_embed

class MLA(nn.Module):
    def __init__(self, config: DeepseekConfig):
        super().__init__()
        self.config = config
        
        # Query投影
        self.q_proj = nn.Sequential(
            nn.Linear(config.hidden_size, config.q_lora_rank),
            DeepseekV2RMSNorm(config.q_lora_rank),
            nn.Linear(config.q_lora_rank, 
                     config.num_heads * (config.qk_nope_head_dim + config.qk_rope_head_dim))
        )
        
        # Key-Value投影
        self.kv_linear1 = nn.Linear(config.hidden_size, config.kv_lora_rank + config.qk_rope_head_dim)
        self.kv_norm = DeepseekV2RMSNorm(config.kv_lora_rank)
        self.kv_linear2 = nn.Linear(config.kv_lora_rank, 
                                  config.num_heads * (config.qk_nope_head_dim + config.v_head_dim))
        
        # 前馈网络
        self.mlp = nn.Sequential(
            nn.Linear(config.hidden_size, config.intermediate_size),
            nn.GELU(),
            nn.Linear(config.intermediate_size, config.hidden_size)
        )
        
        # 旋转位置编码
        self.rotary_emb = DeepseekV2RotaryEmbedding(
            config.qk_rope_head_dim,
            config.max_position_embeddings,
            config.rope_theta
        )
        
        # 输出投影
        self.o_proj = nn.Linear(config.num_heads * config.v_head_dim, config.hidden_size)
        self.attn_dropout = nn.Dropout(config.attention_dropout)

    def forward(self, x, position_ids, attention_mask=None):
        B, T, _ = x.shape
        
        # 处理Query
        q = self.q_proj(x).view(B, T, self.config.num_heads, -1)
        q_nope, q_rope = torch.split(q, [self.config.qk_nope_head_dim, 
                                       self.config.qk_rope_head_dim], dim=-1)
        
        # 处理Key-Value
        kv_combined = self.kv_linear1(x)
        k_rope, kv_lora = torch.split(kv_combined, 
                                    [self.config.qk_rope_head_dim, 
                                     self.config.kv_lora_rank], 
                                    dim=-1)
        kv_lora = self.kv_norm(kv_lora)
        kv = self.kv_linear2(kv_lora).view(B, T, self.config.num_heads, -1)
        k_nope, v = torch.split(kv, [self.config.qk_nope_head_dim, 
                                   self.config.v_head_dim], dim=-1)
        
        # 合并k_rope和k_nope
        k_rope = k_rope.view(B, T, 1, -1).expand(-1, -1, self.config.num_heads, -1)
        k = torch.cat([k_nope, k_rope], dim=-1)
        
        # 应用旋转位置编码
        cos, sin = self.rotary_emb(x, seq_len=T)
        q_rope = q_rope.transpose(1, 2)  # [B, nh, T, hd]
        k_rope = k_rope.transpose(1, 2)  # [B, nh, T, hd]
        q_rope, k_rope = apply_rotary_pos_emb(q_rope, k_rope, cos, sin)
        
        # 注意力计算
        attn_scores = (q_rope @ k_rope.transpose(-2, -1)) / math.sqrt(self.config.qk_rope_head_dim)
        attn_probs = F.softmax(attn_scores, dim=-1)
        attn_probs = self.attn_dropout(attn_probs)
        
        # 上下文融合
        x_attn = (attn_probs @ v.transpose(1, 2)).transpose(1, 2)
        x_attn = x_attn.reshape(B, T, -1)
        x_attn = self.o_proj(x_attn)
        
        # 残差连接和前馈网络
        x = x + x_attn
        x = x + self.mlp(x)
        
        return x

class DeepseekV2Model(nn.Module):
    def __init__(self, config: DeepseekConfig):
        super().__init__()
        self.config = config
        self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
        self.layers = nn.ModuleList([MLA(config) for _ in range(config.num_hidden_layers)])
        self.norm = DeepseekV2RMSNorm(config.hidden_size)
        self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
        self.embeddings.weight = self.lm_head.weight

    def forward(self, input_ids, attention_mask=None):
        B, T = input_ids.shape
        x = self.embeddings(input_ids)
        position_ids = torch.arange(T, device=input_ids.device).expand(B, -1)
        
        for layer in self.layers:
            x = layer(x, position_ids, attention_mask)
            
        x = self.norm(x)
        return self.lm_head(x)

# ========== 数据加载优化 ==========
class TextDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=512):
        with open(file_path, 'r') as f:
            self.data = json.load(f)
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        text = self.data[idx]['text']
        encoding = self.tokenizer(
            text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        return {
            'input_ids': encoding['input_ids'].squeeze(),
            'attention_mask': encoding['attention_mask'].squeeze()
        }

# ========== 训练流程优化 ==========
def train_model():
    tokenizer = BertTokenizer.from_pretrained("/www/wwwroot/data/jupyter/models/modelscope/hub/tiansz/bert-base-chinese")
    config = DeepseekConfig(
        vocab_size=tokenizer.vocab_size,
        pad_token_id=tokenizer.pad_token_id,
        qk_rope_head_dim=64,
        qk_nope_head_dim=64
    )
    
    model = DeepseekV2Model(config).cuda()
    dataset = TextDataset("/www/wwwroot/data/jupyter/aicode/rag/milvus/mytest/xijanfengbao2.json", tokenizer)
    dataloader = DataLoader(dataset, batch_size=16, shuffle=True,num_workers=16,pin_memory=True,persistent_workers=True  )
    
    gradient_accumulation_steps = 2  # 梯度累积步数
    #optimizer = torch.optim.AdamW(model.parameters(), lr=3e-5)
    # 优化器配置
    optimizer = torch.optim.AdamW(model.parameters(), 
                                lr=3e-5,
                                weight_decay=0.01)  # 正则化
    #scaler = torch.cuda.amp.GradScaler()
    # 混合精度训练配置
    scaler = torch.cuda.amp.GradScaler(enabled=True)  # 显式启用
    grad_clip_value = 1.0  # 梯度裁剪阈值
    
    # 学习率调度
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=3e-5,
        total_steps=len(dataloader)*3,  # 3个epoch
        pct_start=0.1  # 10%步数用于warmup
    )
    
    # 训练循环修改
    for epoch in range(3):
        model.train()
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}")
        
        for step, batch in enumerate(progress_bar):
            inputs = batch['input_ids'].cuda(non_blocking=True)  # 异步传输
            
            with torch.amp.autocast(device_type='cuda', dtype=torch.float16):
                outputs = model(inputs)
                loss = F.cross_entropy(
                    outputs.view(-1, config.vocab_size),
                    inputs.view(-1),
                    ignore_index=config.pad_token_id
                )
                loss = loss / gradient_accumulation_steps  # 梯度累积
                
            scaler.scale(loss).backward()
            
            # 梯度裁剪
            if (step+1) % gradient_accumulation_steps == 0:
                scaler.unscale_(optimizer)
                torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_value)
                
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad(set_to_none=True)  # 更高效清零
                scheduler.step()
            
            progress_bar.set_postfix(
                loss=f"{loss.item()*gradient_accumulation_steps:.4f}",
                lr=f"{scheduler.get_last_lr()[0]:.2e}"
            )

        # 保存每个epoch的模型
        torch.save(model.state_dict(), f'seek/deepseek_model_{epoch}.pth')
    
    #torch.save(model.state_dict(), "seek/deepseek_model.pth")

# ========== 生成优化 ==========
def generate(prompt, max_length=512):
    tokenizer = BertTokenizer.from_pretrained("/www/wwwroot/data/jupyter/models/modelscope/hub/tiansz/bert-base-chinese")
    config = DeepseekConfig(
        vocab_size=tokenizer.vocab_size,
        pad_token_id=tokenizer.pad_token_id,
        qk_rope_head_dim=64,
        qk_nope_head_dim=64
    )
    model = DeepseekV2Model(config).cuda()
    model.load_state_dict(torch.load("seek/deepseek_model_1.pth"))
    model.eval()
    
    input_ids = tokenizer.encode(prompt, return_tensors="pt").cuda()
    
    for _ in range(max_length):
        with torch.no_grad():
            outputs = model(input_ids)
        next_token = outputs[:, -1, :].argmax(-1)
        input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=1)
        
    return tokenizer.decode(input_ids[0], skip_special_tokens=True)

if __name__ == "__main__":
    train_model()
    print(generate("王铮竟然"))