import flwr as fl
import torch
from collections import OrderedDict
from typing import Dict, List, Tuple
from models.time_series import TimeSeriesLSTM, TimeSeriesTrainer
from models.image_classifier import ImageClassifier, ImageClassifierTrainer
from config import DEVICE

class FederatedClient(fl.client.NumPyClient):
    def __init__(self, model, trainer, train_loader, val_loader):
        self.model = model
        self.trainer = trainer
        self.train_loader = train_loader
        self.val_loader = val_loader

    def get_parameters(self, config) -> List[np.ndarray]:
        return [val.cpu().numpy() for _, val in self.model.state_dict().items()]

    def set_parameters(self, parameters: List[np.ndarray]) -> None:
        params_dict = zip(self.model.state_dict().keys(), parameters)
        state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
        self.model.load_state_dict(state_dict, strict=True)

    def fit(self, parameters, config):
        self.set_parameters(parameters)
        
        # 训练模型
        for epoch in range(config["epochs"]):
            for batch_idx, (x, y) in enumerate(self.train_loader):
                loss = self.trainer.train_step(x, y)
        
        return self.get_parameters(config={}), len(self.train_loader.dataset), {}

    def evaluate(self, parameters, config):
        self.set_parameters(parameters)
        
        loss = 0
        total_samples = 0
        
        for x, y in self.val_loader:
            batch_loss = self.trainer.evaluate(x, y)
            batch_size = x.shape[0]
            loss += batch_loss * batch_size
            total_samples += batch_size
            
        avg_loss = loss / total_samples
        return avg_loss, total_samples, {"loss": avg_loss} 