import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from tqdm import tqdm
import warnings

warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cuda.matmul.allow_tf32 = True

class MOEConfig:
    def __init__(self):
        self.hidden_dim = 768
        self.expert_number = 8
        self.top_k = 2
        self.shared_experts = 2
        self.max_seq_len = 512
        self.batch_size = 16
        self.num_layers = 6

class MOERouter(nn.Module):
    def __init__(self, hidden_dim, num_experts, top_k):
        super().__init__()
        # 关键修复：添加bias=True
        self.gate = nn.Linear(hidden_dim, num_experts, bias=True)
        self.num_experts = num_experts
        self.top_k = top_k
        
        nn.init.xavier_normal_(self.gate.weight)
        nn.init.zeros_(self.gate.bias)  # 现在可以正常初始化

    def forward(self, hidden_states):
        router_logits = self.gate(hidden_states)
        routing_probs = F.softmax(router_logits, dim=-1)
        
        topk_weights, topk_indices = torch.topk(routing_probs, self.top_k, dim=-1)
        topk_weights = topk_weights / (topk_weights.sum(dim=-1, keepdim=True) + 1e-6)
        
        expert_mask = F.one_hot(topk_indices, self.num_experts).permute(2, 0, 1)
        return router_logits, topk_weights, topk_indices, expert_mask

class BertExpert(nn.Module):
    def __init__(self, hidden_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(hidden_dim, 4*hidden_dim),
            nn.GELU(),
            nn.Linear(4*hidden_dim, hidden_dim),
            nn.Dropout(0.1)
        )
        nn.init.xavier_normal_(self.net[0].weight)
        nn.init.xavier_normal_(self.net[2].weight)

    def forward(self, x):
        return self.net(x)

class SparseMOE(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.experts = nn.ModuleList([BertExpert(config.hidden_dim) for _ in range(config.expert_number)])
        self.shared_experts = nn.ModuleList([BertExpert(config.hidden_dim) for _ in range(config.shared_experts)])
        self.router = MOERouter(config.hidden_dim, config.expert_number, config.top_k)
        self.residual_factor = nn.Parameter(torch.tensor(1.0))
        self.ln = nn.LayerNorm(config.hidden_dim)

    def forward(self, x):
        residual = x
        batch_size, seq_len, hidden_dim = x.shape
        
        x_flat = x.view(-1, hidden_dim)
        router_logits, weights, indices, mask = self.router(x_flat)
        
        expert_outputs = torch.stack([expert(x_flat) for expert in self.experts])
        weighted_output = torch.einsum('ebk,ebh->bh', mask * weights.unsqueeze(0), expert_outputs)
        weighted_output = weighted_output.view(batch_size, seq_len, hidden_dim)
        
        shared_output = sum(expert(residual) for expert in self.shared_experts) / len(self.shared_experts)
        output = self.ln(weighted_output + shared_output)
        return residual * self.residual_factor + output, router_logits

class MOELanguageModel(nn.Module):
    def __init__(self, config, vocab_size):
        super().__init__()
        self.embeddings = nn.Embedding(vocab_size, config.hidden_dim)
        self.moe_layers = nn.ModuleList([SparseMOE(config) for _ in range(config.num_layers)])
        self.final_ln = nn.LayerNorm(config.hidden_dim)
        self.lm_head = nn.Linear(config.hidden_dim, vocab_size)
        nn.init.xavier_normal_(self.lm_head.weight)

    def forward(self, input_ids):
        x = self.embeddings(input_ids)
        router_logits = []
        for layer in self.moe_layers:
            x, logs = layer(x)
            router_logits.append(logs)
        return self.lm_head(self.final_ln(x)), torch.stack(router_logits)

class TextDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=512):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.data = []
        
        with open(file_path, 'r') as f:
            for item in json.load(f):
                encoding = tokenizer(
                    item['text'],
                    max_length=max_length,
                    padding='max_length',
                    truncation=True,
                    return_tensors='pt'
                )
                self.data.append(encoding)

    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        return {
            'input_ids': item['input_ids'].squeeze(),
            'attention_mask': item['attention_mask'].squeeze()
        }

def load_balancing_loss(router_logits, num_experts, top_k):
    probs = torch.stack([F.softmax(logits, dim=-1) for logits in router_logits])
    expert_counts = torch.zeros(num_experts, device=device)
    
    for layer_probs in probs:
        _, topk_indices = torch.topk(layer_probs, top_k, dim=-1)
        counts = torch.bincount(topk_indices.view(-1), minlength=num_experts)
        expert_counts += counts.float()
    
    expert_probs = (expert_counts + 1e-3) / (expert_counts.sum() + num_experts * 1e-3)
    uniform = torch.ones_like(expert_probs) / num_experts
    return F.kl_div(torch.log(expert_probs), uniform, reduction='batchmean')

def train():
    config = MOEConfig()
    tokenizer = BertTokenizer.from_pretrained("/www/wwwroot/data/jupyter/models/modelscope/hub/tiansz/bert-base-chinese")
    
    dataset = TextDataset(
        "/www/wwwroot/data/jupyter/aicode/rag/milvus/mytest/xijanfengbao.json",
        tokenizer,
        config.max_seq_len
    )
    loader = DataLoader(dataset, batch_size=config.batch_size, shuffle=True)
    
    model = MOELanguageModel(config, tokenizer.vocab_size).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5, weight_decay=0.01)
    scheduler = CosineAnnealingLR(optimizer, T_max=len(loader)*10)
    scaler = torch.cuda.amp.GradScaler()
    
    best_loss = float('inf')
    for epoch in range(2):
        model.train()
        total_loss = 0
        
        with tqdm(loader, desc=f"Epoch {epoch+1}") as pbar:
            for batch in pbar:
                inputs = batch['input_ids'].to(device)
                
                optimizer.zero_grad()
                
                with torch.amp.autocast(device_type='cuda', dtype=torch.float16):
                    logits, router_logits = model(inputs)
                    
                    shift_logits = logits[:, :-1, :].contiguous()
                    shift_labels = inputs[:, 1:].contiguous()
                    mask = (shift_labels != tokenizer.pad_token_id).float()
                    
                    loss = F.cross_entropy(
                        shift_logits.view(-1, shift_logits.size(-1)),
                        shift_labels.view(-1),
                        reduction='none'
                    ).view(shift_labels.shape)
                    loss = (loss * mask).sum() / mask.sum()
                    
                    aux_loss = load_balancing_loss(router_logits, config.expert_number, config.top_k)
                    total_loss = loss + 0.1 * aux_loss
                
                scaler.scale(total_loss).backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
                scaler.step(optimizer)
                scaler.update()
                scheduler.step()
                
                pbar.set_postfix(loss=loss.item(), aux_loss=aux_loss.item())
                
        if loss < best_loss:
            best_loss = loss
            torch.save(model.state_dict(), "moe/moe_best.pt")
        
        torch.cuda.empty_cache()

def generate_text(prompt, model, tokenizer, max_length=100):
    model.eval()
    input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)
    
    for _ in range(max_length):
        with torch.no_grad():
            logits, _ = model(input_ids)
            next_token = logits[:, -1, :].argmax(dim=-1)
        
        input_ids = torch.cat([input_ids, next_token.unsqueeze(0)], dim=-1)
        
        if next_token == tokenizer.sep_token_id:
            break
    
    return tokenizer.decode(input_ids[0], skip_special_tokens=True)

if __name__ == "__main__":
    train()
    
    # 示例生成
    config = MOEConfig()
    tokenizer = BertTokenizer.from_pretrained("/www/wwwroot/data/jupyter/models/modelscope/hub/tiansz/bert-base-chinese")
    model = MOELanguageModel(config, tokenizer.vocab_size).to(device)
    model.load_state_dict(torch.load("moe/moe_best.pt"))
    print(generate_text("人工智能的未来发展", model, tokenizer))