#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
联邦学习服务器模块

功能：
1. 协调客户端训练
2. 模型参数聚合
3. 全局模型管理
"""

import torch
import numpy as np
from typing import Dict, List
from collections import defaultdict

class FederatedServer:
    """联邦学习服务器"""
    
    def __init__(self, model: torch.nn.Module, config: Dict):
        """
        初始化服务器
        
        参数:
            model: 全局模型架构
            config: 配置字典
        """
        self.global_model = model
        self.config = config
        self.client_models = defaultdict(dict)
        self.current_round = 0
        
        # 初始化全局模型参数
        self._init_global_model()
    
    def _init_global_model(self):
        """初始化全局模型参数"""
        for param in self.global_model.parameters():
            param.data.zero_()
    
    def aggregate(self, client_updates: List[Dict]):
        """
        聚合客户端更新
        
        参数:
            client_updates: 客户端参数更新列表
            
        返回:
            聚合后的全局模型
        """
        # 根据客户端数据量加权平均
        total_samples = sum(update['num_samples'] for update in client_updates)
        aggregated_update = {}
        
        # 初始化聚合参数
        for key in client_updates[0]['params'].keys():
            aggregated_update[key] = torch.zeros_like(
                client_updates[0]['params'][key]
            )
        
        # 加权聚合
        for update in client_updates:
            weight = update['num_samples'] / total_samples
            for key in update['params'].keys():
                aggregated_update[key] += update['params'][key] * weight
        
        # 更新全局模型
        for param, new_param in zip(self.global_model.parameters(), aggregated_update.values()):
            param.data = new_param.clone()
        
        self.current_round += 1
        return self.global_model
    
    def evaluate_global_model(self, test_loader):
        """评估全局模型性能"""
        self.global_model.eval()
        total_loss = 0.0
        correct = 0
        total = 0
        
        with torch.no_grad():
            for data, target in test_loader:
                output = self.global_model(data)
                total_loss += torch.nn.functional.cross_entropy(output, target, reduction='sum').item()
                pred = output.argmax(dim=1, keepdim=True)
                correct += pred.eq(target.view_as(pred)).sum().item()
                total += target.size(0)
        
        return {
            'accuracy': correct / total,
            'loss': total_loss / total
        }