import torch
import torch.nn as nn

class LocalTraining:
    def __init__(self, clip_threshold, noise_scale, lambda_reg=0.5, fusion_coeff=0.7):
        self.clip_threshold = clip_threshold
        self.noise_scale = noise_scale
        self.lambda_reg = lambda_reg
        self.fusion_coeff = fusion_coeff
        
    def dual_model_training(self, global_model, personal_model, local_data, device, num_epochs=1, batch_size=64, lr=0.01):
        """双模型本地训练"""
        # 准备数据加载器
        train_loader = torch.utils.data.DataLoader(local_data, batch_size=batch_size, shuffle=True)
        
        # 全局模型训练
        global_model.train()
        global_optimizer = torch.optim.SGD(global_model.parameters(), lr=lr)
        for epoch in range(num_epochs):
            for data, target in train_loader:
                data, target = data.to(device), target.to(device)
                global_optimizer.zero_grad()
                output = global_model(data)
                loss = nn.CrossEntropyLoss()(output, target)
                loss.backward()
                global_optimizer.step()
        
        # 个性化模型训练
        personal_model.train()
        personal_optimizer = torch.optim.SGD(personal_model.parameters(), lr=lr)
        for epoch in range(num_epochs):
            for data, target in train_loader:
                data, target = data.to(device), target.to(device)
                personal_optimizer.zero_grad()
                output = personal_model(data)
                # 正则化项：个性化模型参数与全局模型参数的差异
                reg_loss = 0.0
                for p_param, g_param in zip(personal_model.parameters(), global_model.parameters()):
                    reg_loss += torch.norm(p_param - g_param.detach(), p=2)
                loss = nn.CrossEntropyLoss()(output, target) + self.lambda_reg * reg_loss
                loss.backward()
                personal_optimizer.step()
        
        # 梯度融合计算
        global_grads = self._get_gradients(global_model)
        personal_grads = self._get_gradients(personal_model)
        fused_grads = {}
        for name in global_grads:
            fused_grads[name] = global_grads[name] + self.fusion_coeff * (personal_grads[name] - global_grads[name])
        
        return fused_grads
    
    def gradient_protection(self, gradients):
        """梯度保护处理"""
        clipped_grads = {}
        # 动态裁剪
        for name, grad in gradients.items():
            grad_norm = torch.norm(grad)
            scale = min(1.0, self.clip_threshold / (grad_norm + 1e-8))
            clipped_grads[name] = grad * scale
        
        # 自适应加噪策略
        noisy_grads = {}
        for name, grad in clipped_grads.items():
            noise = torch.randn_like(grad) * self.noise_scale
            noisy_grads[name] = grad + noise
        
        return noisy_grads, clipped_grads
    
    def _get_gradients(self, model):
        """获取模型梯度"""
        grads = {}
        for name, param in model.named_parameters():
            if param.grad is not None:
                grads[name] = param.grad.clone()
        return grads
