import torch
import torch.nn as nn
import torch.optim as optim
from typing import Dict, Any
import logging
import numpy as np
from pathlib import Path

from ..common.protocol import FederatedLearningProtocol
from ..common.utils import save_model, load_model

class FederatedClient:
    """联邦学习客户端"""
    
    def __init__(self, client_id: str, config: Dict[str, Any]):
        """
        初始化联邦学习客户端
        
        参数:
            client_id: 客户端ID
            config: 配置字典
        """
        self.client_id = client_id
        self.config = config
        self.logger = logging.getLogger(f"FedClient-{client_id}")
        self.local_model = None
        self.local_data = None
        
    def set_model(self, model: nn.Module) -> None:
        """设置本地模型"""
        self.local_model = model
        self.logger.info("接收全局模型")
        
    def set_data(self, data: Dict[str, Any]) -> None:
        """设置本地数据"""
        self.local_data = data
        self.logger.info(f"加载本地数据，样本数: {len(data['x'])}")
        
    def train(self) -> Dict[str, torch.Tensor]:
        """本地训练"""
        if self.local_model is None:
            raise ValueError("未设置本地模型")
        if self.local_data is None:
            raise ValueError("未设置本地数据")
            
        self.logger.info("开始本地训练...")
        start_time = time.time()
        
        # 准备数据
        x = torch.from_numpy(self.local_data['x']).float()
        y = torch.from_numpy(self.local_data['y']).long()
        dataset = torch.utils.data.TensorDataset(x, y)
        loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=self.config.get('batch_size', 32),
            shuffle=True
        )
        
        # 初始化优化器
        optimizer_name = self.config.get('optimizer', 'sgd').lower()
        if optimizer_name == 'sgd':
            optimizer = optim.SGD(
                self.local_model.parameters(),
                lr=self.config.get('learning_rate', 0.01),
                momentum=self.config.get('momentum', 0.9)
            )
        elif optimizer_name == 'adam':
            optimizer = optim.Adam(
                self.local_model.parameters(),
                lr=self.config.get('learning_rate', 0.001)
            )
        else:
            raise ValueError(f"未知优化器: {optimizer_name}")
        
        criterion = nn.CrossEntropyLoss()
        best_loss = float('inf')
        early_stop = self.config.get('early_stop', 3)
        no_improve = 0
        
        # 训练循环
        self.local_model.train()
        for epoch in range(self.config.get('local_epochs', 3)):
            epoch_loss = 0.0
            for batch_x, batch_y in loader:
                optimizer.zero_grad()
                outputs = self.local_model(batch_x)
                loss = criterion(outputs, batch_y)
                loss.backward()
                
                # 梯度裁剪
                if self.config.get('clip_grad', False):
                    torch.nn.utils.clip_grad_norm_(
                        self.local_model.parameters(),
                        self.config.get('clip_value', 1.0)
                    )
                
                optimizer.step()
                epoch_loss += loss.item()
            
            # 早停检查
            avg_loss = epoch_loss / len(loader)
            if avg_loss < best_loss:
                best_loss = avg_loss
                no_improve = 0
            else:
                no_improve += 1
                if no_improve >= early_stop:
                    self.logger.info(f"早停触发: 验证损失 {epoch+1} 轮未提升")
                    break
                
        # 应用差分隐私
        model_state = self.local_model.state_dict()
        if self.config.get('differential_privacy', False):
            model_state = add_differential_privacy(
                model_state,
                self.config.get('noise_scale', 0.01)
            )
            
        elapsed = time.time() - start_time
        self.logger.info(f"本地训练完成，耗时: {elapsed:.2f}s")
        
        return model_state
    
    def save_checkpoint(self, path: str) -> None:
        """保存检查点"""
        if self.local_model is None:
            raise ValueError("未设置本地模型")
            
        save_model(
            self.local_model,
            path,
            f"client_{self.client_id}_model",
            metadata={
                'config': self.config,
                'data_stats': {
                    'samples': len(self.local_data['x']),
                    'features': self.local_data['x'].shape[1]
                } if self.local_data else None
            }
        )
        
    def load_checkpoint(self, path: str) -> None:
        """加载检查点"""
        checkpoint = load_model(self.local_model, path, f"client_{self.client_id}_model")
        self.config = checkpoint['metadata'].get('config', {})
        
    def run(self):
        """客户端主循环"""
        self.logger.info(f"客户端 {self.client_id} 启动")
        
        # 模拟接收全局模型
        global_model = nn.Sequential(
            nn.Linear(10, 32),
            nn.ReLU(),
            nn.Linear(32, 2)
        )
        self.set_model(global_model)
        
        # 模拟本地数据
        x = np.random.randn(100, 10)
        y = np.random.randint(0, 2, size=100)
        self.set_data({'x': x, 'y': y})
        
        # 本地训练
        updated_weights = self.train()
        
        # 保存检查点
        self.save_checkpoint(Path(self.config.get('output_dir', 'checkpoints')) / self.client_id)
        
        return updated_weights

def main():
    """联邦学习客户端入口"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # 示例配置
    config = {
        'learning_rate': 0.01,
        'momentum': 0.9,
        'batch_size': 32,
        'local_epochs': 3,
        'output_dir': 'checkpoints'
    }
    
    # 启动客户端
    client = FederatedClient("client_1", config)
    client.run()

if __name__ == "__main__":
    main()