import torch
import torch.nn as nn
import torch.optim as optim
from typing import List, Dict, Any
import logging
from pathlib import Path
import time

from ..common.protocol import FederatedLearningProtocol
from ..common.utils import save_model, load_model

class FederatedServer:
    """联邦学习服务端"""
    
    def __init__(self, global_model: nn.Module, config: Dict[str, Any]):
        """
        初始化联邦学习服务端
        
        参数:
            global_model: 全局模型
            config: 配置字典
        """
        self.global_model = global_model
        self.config = config
        self.clients = []
        self.logger = logging.getLogger("FedServer")
        
        # 初始化优化器
        self.optimizer = optim.SGD(
            self.global_model.parameters(),
            lr=config.get('learning_rate', 0.01),
            momentum=config.get('momentum', 0.9)
        )
        
    def add_client(self, client_id: str):
        """添加客户端"""
        self.clients.append(client_id)
        self.logger.info(f"添加客户端: {client_id}")
        
    def aggregate(self, client_weights: List[Dict[str, torch.Tensor]], 
                sample_sizes: List[int] = None) -> None:
        """
        聚合客户端模型权重
        
        参数:
            client_weights: 客户端模型权重列表
            sample_sizes: 每个客户端的样本数量(可选)
        """
        self.logger.info("开始模型聚合...")
        start_time = time.time()
        
        # 根据配置选择聚合算法
        algorithm = self.config.get('aggregation', 'fedavg')
        
        if algorithm == 'fedavg':
            # 联邦平均算法
            if sample_sizes:
                averaged_weights = federated_average(client_weights, sample_sizes)
            else:
                averaged_weights = {}
                for key in client_weights[0].keys():
                    averaged_weights[key] = torch.mean(
                        torch.stack([w[key] for w in client_weights]), dim=0
                    )
        elif algorithm == 'fedprox':
            # FedProx算法
            averaged_weights = self._fedprox_aggregate(client_weights)
        else:
            raise ValueError(f"未知聚合算法: {algorithm}")
            
        # 验证模型权重
        self._validate_weights(averaged_weights)
        
        # 更新全局模型
        self.global_model.load_state_dict(averaged_weights)
        
        elapsed = time.time() - start_time
        self.logger.info(f"模型聚合完成({algorithm})，耗时: {elapsed:.2f}s")
        
    def _fedprox_aggregate(self, client_weights: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
        """FedProx聚合算法"""
        # 实现略
        pass
        
    def _validate_weights(self, weights: Dict[str, torch.Tensor]) -> None:
        """验证模型权重有效性"""
        for key, value in weights.items():
            if torch.isnan(value).any() or torch.isinf(value).any():
                raise ValueError(f"无效模型权重检测到NaN/Inf值: {key}")
        
    def save_checkpoint(self, path: str) -> None:
        """保存检查点"""
        save_model(
            self.global_model,
            path,
            "global_model",
            metadata={
                'config': self.config,
                'clients': self.clients
            }
        )
        
    def load_checkpoint(self, path: str) -> None:
        """加载检查点"""
        checkpoint = load_model(self.global_model, path, "global_model")
        self.config = checkpoint['metadata'].get('config', {})
        self.clients = checkpoint['metadata'].get('clients', [])
        
    def run(self, rounds: int = 10):
        """运行联邦学习"""
        self.logger.info(f"开始联邦学习，总轮次: {rounds}")
        
        for round in range(1, rounds + 1):
            self.logger.info(f"\n=== 第 {round}/{rounds} 轮 ===")
            
            # 1. 选择客户端
            selected = self.select_clients()
            
            # 2. 发送全局模型给客户端
            self.dispatch_model(selected)
            
            # 3. 接收客户端更新
            client_weights = self.collect_updates(selected)
            
            # 4. 聚合更新
            self.aggregate(client_weights)
            
            # 5. 保存检查点
            if round % self.config.get('save_interval', 5) == 0:
                self.save_checkpoint(
                    Path(self.config['output_dir']) / f"round_{round}"
                )
                
    def select_clients(self) -> List[str]:
        """选择参与本轮训练的客户端"""
        # 简单实现：随机选择部分客户端
        num_select = max(1, int(len(self.clients) * self.config.get('client_fraction', 0.5)))
        selected = np.random.choice(self.clients, num_select, replace=False)
        self.logger.info(f"选择客户端: {selected}")
        return selected
        
    def dispatch_model(self, client_ids: List[str]) -> None:
        """分发全局模型给客户端"""
        self.logger.info(f"分发模型给 {len(client_ids)} 个客户端")
        # 实际实现中这里会通过网络发送模型
        
    def collect_updates(self, client_ids: List[str]) -> List[Dict[str, torch.Tensor]]:
        """收集客户端模型更新"""
        self.logger.info(f"等待 {len(client_ids)} 个客户端上传更新")
        # 模拟客户端更新
        updates = []
        for cid in client_ids:
            # 实际实现中这里会通过网络接收模型
            client_model = type(self.global_model)()
            client_model.load_state_dict(self.global_model.state_dict())
            
            # 模拟客户端本地训练后的权重变化
            for param in client_model.parameters():
                param.data += torch.randn_like(param) * 0.01
                
            updates.append(client_model.state_dict())
            
        return updates

def main():
    """联邦学习服务端入口"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # 示例配置
    config = {
        'learning_rate': 0.01,
        'momentum': 0.9,
        'client_fraction': 0.5,
        'output_dir': 'checkpoints',
        'save_interval': 5
    }
    
    # 示例模型
    model = nn.Sequential(
        nn.Linear(10, 32),
        nn.ReLU(),
        nn.Linear(32, 2)
    )
    
    # 启动服务端
    server = FederatedServer(model, config)
    
    # 添加示例客户端
    for i in range(1, 6):
        server.add_client(f"client_{i}")
        
    # 运行联邦学习
    server.run(rounds=10)

if __name__ == "__main__":
    main()