import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm


class MOERouter(nn.Module):
    """专家路由控制器（含Top-K选择与负载均衡）
    功能：
    1. 计算每个token对应的专家权重
    2. 选择top-k专家
    3. 生成专家掩码矩阵
    
    前向传播流程：
    hidden_states → 线性变换 → softmax归一化 → topk选择 → 权重归一化 → 生成掩码
    """
    def __init__(self, hidden_dim, expert_number, top_k):
        super().__init__()
        self.gate = nn.Linear(hidden_dim, expert_number)  # 路由决策层
        self.expert_number = expert_number
        self.top_k = top_k
        
        # 参数初始化（与Transformer一致）
        nn.init.normal_(self.gate.weight, std=0.02)
        nn.init.zeros_(self.gate.bias)  # ✅ 添加偏置初始化

    def forward(self, hidden_states):
        # 输入形状: (batch*seq_len, hidden_dim)
        router_logits = self.gate(hidden_states)  # (b*s, expert_num)
        
        # 温度系数控制选择强度（可学习参数更佳）
        temperature = 0.1  # ✅ 可改为可学习参数
        routing_scores = router_logits / temperature
        
        # 路由概率归一化（稳定实现）
        routing_probs = F.softmax(routing_scores, dim=-1, dtype=torch.float32)
        
        # 选择Top-K专家（确保k不超过专家数）
        top_k = min(self.top_k, self.expert_number)
        router_weights, selected_experts = torch.topk(
            routing_probs, top_k, dim=-1
        )  # weights: (b*s, k), indices: (b*s, k)
        
        # 权重数值保护与归一化
        router_weights = router_weights.clamp(min=1e-4)
        router_weights = router_weights / router_weights.sum(dim=-1, keepdim=True)
        router_weights = router_weights.to(hidden_states.dtype)
        
        # 生成专家选择掩码（优化内存布局）
        expert_mask = F.one_hot(selected_experts, self.expert_number)  # (b*s, k, E)
        expert_mask = expert_mask.permute(2, 0, 1)  # (E, b*s, k) ✅ 更优的内存布局
        
        return router_logits, router_weights, selected_experts, expert_mask

class BasicExpert(nn.Module):
    """基础专家模块（需要用户自定义）"""
    def __init__(self, input_dim, output_dim):
        super().__init__()
        # ✅ 示例实现
        self.net = nn.Sequential(
            nn.Linear(input_dim, 4 * input_dim),
            nn.GELU(),
            nn.Linear(4 * input_dim, output_dim)
        )
    
    def forward(self, x):
        return self.net(x)

class SparseMOE(nn.Module):
    """稀疏混合专家模型（Token级路由）
    核心组件：
    1. 专家池：多个独立的前馈网络
    2. 共享专家：所有token都会使用的通用专家
    3. 路由器：动态分配token到专家
    
    前向传播流程：
    输入 → 展平 → 路由选择 → 专家并行计算 → 结果聚合 → 残差连接
    """
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 专家池（使用ModuleList保证参数注册）
        self.experts = nn.ModuleList([
            BasicExpert(config.hidden_dim, config.hidden_dim)
            for _ in range(config.expert_number)
        ])
        
        # 共享专家层（增强模型通用能力）
        self.shared_experts = nn.ModuleList([
            BasicExpert(config.hidden_dim, config.hidden_dim)
            for _ in range(config.shared_experts_number)
        ])
        
        # 路由控制器
        self.router = MOERouter(
            config.hidden_dim,
            config.expert_number,
            config.top_k
        )
        
        # 可学习的残差系数
        self.residual_factor = nn.Parameter(torch.tensor(0.1))

    def forward(self, x):
        # 输入形状: (batch, seq, hidden_dim)
        residual = x  # 保留原始输入用于残差连接
        
        batch_size, seq_len, hidden_dim = x.shape
        hidden_states = x.view(-1, hidden_dim)  # (b*s, h)
        
        # 路由决策
        router_logits, router_weights, selected_experts, expert_mask = self.router(hidden_states)
        
        # 初始化输出容器
        final_hidden = torch.zeros_like(hidden_states)
        
        # 并行处理专家计算（优化内存使用）
        for expert_idx, expert in enumerate(self.experts):
            # 获取当前专家需要处理的token
            mask = expert_mask[expert_idx]  # (b*s, k)
            if not mask.any():
                continue
                
            # 计算加权输出
            weights = (router_weights * mask).sum(dim=1)  # (b*s,)
            expert_output = expert(hidden_states)  # (b*s, h)
            final_hidden += expert_output * weights.unsqueeze(-1)
        
        # 处理共享专家（所有token都会使用）
        shared_output = sum(
            expert(hidden_states) 
            for expert in self.shared_experts
        ) / len(self.shared_experts)
        final_hidden += shared_output
        
        # 恢复形状并添加残差连接
        final_hidden = final_hidden.view(batch_size, seq_len, hidden_dim)
        return residual * self.residual_factor + final_hidden, router_logits

def switch_load_balancing_loss(router_logits: torch.Tensor, num_experts: int, top_k: int) -> torch.Tensor:
    """修正版负载均衡损失
    目标：使每个专家的负载尽可能均衡
    实现：
    1. 计算每个专家的被选概率
    2. 计算实际负载与均匀分布的KL散度
    """
    # 计算路由概率
    routing_probs = F.softmax(router_logits, dim=-1)
    
    # 获取专家选择（考虑top_k）
    _, selected_experts = torch.topk(routing_probs, k=top_k, dim=-1)
    
    # 生成专家选择统计（每个token贡献k次计数）
    mask = F.one_hot(selected_experts, num_experts).sum(dim=1).float()  # (b*s, E)
    
    # 计算专家负载差异
    expert_load = mask.mean(dim=0)  # (E,)
    uniform_load = torch.ones_like(expert_load) / num_experts
    aux_loss = F.kl_div(
        input=torch.log(uniform_load + 1e-6),
        target=expert_load + 1e-6,
        reduction='batchmean'
    )
    
    return aux_loss

class MOEConfig:
    """MOE模型配置类（修复NameError）"""
    def __init__(self):
        self.hidden_dim = 768
        self.expert_number = 8
        self.top_k = 2
        self.shared_experts_number = 2

   

# 新增文本处理模块
class TextDataset(Dataset):
    """自定义文本数据集处理"""
    def __init__(self, file_path, tokenizer, max_length=128):
        self.data = []
        self.vocab = {'pad>':0, 'unk>':1}
        self.tokenizer = tokenizer
        
        # 加载并处理数据
        with open(file_path, 'r') as f:
            raw_data = json.load(f)
            for item in raw_data:
                tokens = self.tokenizer(item['text'])
                self.data.append(tokens)
                
                # 动态构建词汇表
                for token in tokens:
                    if token not in self.vocab:
                        self.vocab[token] = len(self.vocab)
        
        self.max_length = max_length
        self.vocab_size = len(self.vocab)
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        tokens = self.data[idx]
        # 截断或填充序列
        if len(tokens) > self.max_length:
            tokens = tokens[:self.max_length]
        else:
            tokens = tokens + ['pad>'] * (self.max_length - len(tokens))
        
        # 转换为索引
        input_ids = [self.vocab.get(t, self.vocab['unk>']) for t in tokens]
        return torch.tensor(input_ids[:-1]), torch.tensor(input_ids[1:])  # 输入和目标

# 新增语言模型封装
class MOELanguageModel(nn.Module):
    """带MoE的完整语言模型"""
    def __init__(self, config, vocab_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, config.hidden_dim)
        self.moe = SparseMOE(config)
        self.lm_head = nn.Linear(config.hidden_dim, vocab_size)
    
    def forward(self, input_ids):
        # 输入形状: (batch, seq_len)
        x = self.embedding(input_ids)  # (batch, seq, hidden)
        x, router_logits = self.moe(x)
        logits = self.lm_head(x)       # (batch, seq, vocab_size)
        return logits, router_logits
    
    def generate(self, prompt, tokenizer, max_length=50, temperature=0.7):
        """文本生成函数"""
        self.eval()
        with torch.no_grad():
            tokens = tokenizer(prompt)
            input_ids = [self.dataset.vocab.get(t, self.vocab['unk>']) for t in tokens]
            
            for _ in range(max_length):
                inputs = torch.tensor([input_ids]).to(next(self.parameters()).device)
                logits, _ = self(inputs[:, -self.dataset.max_length:])
                next_logits = logits[0, -1, :] / temperature
                next_id = torch.multinomial(F.softmax(next_logits, dim=-1), num_samples=1)
                input_ids.append(next_id.item())
                
                if next_id == self.vocab['pad>']:
                    break
                    
            return ''.join([tokenizer.inverse_vocab[idx] for idx in input_ids])

# 修改训练函数
def train_moe_model():
    # 初始化配置
    config = MOEConfig()
    
    # 创建数据集
    def simple_tokenizer(text):
        return list(text)  # 字符级分词
    
    dataset = TextDataset(
        "/www/wwwroot/data/jupyter/aicode/rag/milvus/mytest/xijanfengbao.json",
        tokenizer=simple_tokenizer,
        max_length=128
    )
    loader = DataLoader(dataset, batch_size=8, shuffle=True)
    
    # 初始化模型
    model = MOELanguageModel(config, vocab_size=dataset.vocab_size)
    optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
    
    # 混合精度初始化
    if hasattr(torch.amp, 'GradScaler'):
        scaler = torch.amp.GradScaler()
    else:
        scaler = torch.cuda.amp.GradScaler()
    
    # 训练循环
    for epoch in range(10):
        progress = tqdm(loader, desc=f"Epoch {epoch+1}")
        for inputs, targets in progress:
            optimizer.zero_grad()
            
            # 混合精度上下文
            with torch.amp.autocast(device_type='cuda', dtype=torch.float16) if hasattr(torch.amp, 'autocast') \
                else torch.cuda.amp.autocast():
                
                logits, router_logits = model(inputs)
                main_loss = F.cross_entropy(
                    logits.view(-1, dataset.vocab_size),
                    targets.view(-1),
                    ignore_index=dataset.vocab['pad>']
                )
                aux_loss = switch_load_balancing_loss(
                    router_logits,
                    config.expert_number,
                    config.top_k
                )
                total_loss = main_loss + 0.01 * aux_loss
            
            # 反向传播
            scaler.scale(total_loss).backward()
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            scaler.step(optimizer)
            scaler.update()
            
            progress.set_postfix(loss=total_loss.item())
    
    # 保存模型
    torch.save({
        'model_state': model.state_dict(),
        'vocab': dataset.vocab,
        'config': config
    }, "moe_lm.pth")
    print("模型保存完成")

# 加载和使用模型
def load_and_generate():
    # 加载模型
    checkpoint = torch.load("moe_lm.pth")
    
    # 重建tokenizer
    class ReverseTokenizer:
        def __init__(self, vocab):
            self.inverse_vocab = {v:k for k,v in vocab.items()}
        
        def __call__(self, ids_list):
            return [self.inverse_vocab.get(i, 'unk>') for i in ids_list]
    
    # 初始化配置
    config = checkpoint['config']
    model = MOELanguageModel(config, vocab_size=len(checkpoint['vocab']))
    model.load_state_dict(checkpoint['model_state'])
    
    # 生成示例
    tokenizer = ReverseTokenizer(checkpoint['vocab'])
    generated = model.generate("人工智能", tokenizer, max_length=100)
    print("生成结果:", generated)

# 执行训练和生成
if __name__ == "__main__":
    train_moe_model()
    load_and_generate()