# File: fedentgate/core/client.py
"""Federated Learning Client Implementation"""

import copy
import torch

class FLClient:
    def __init__(self, client_id, dataset, model):
        """
        Initialize federated learning client
        Args:
            client_id: Unique client identifier
            dataset: Local dataset
            model: Initial model parameters
        """
        self.id = client_id
        self.dataset = dataset
        self.model = copy.deepcopy(model)
        self.entropy = self.compute_data_entropy()
        
    def compute_data_entropy(self):
        """Compute information entropy of local data"""
        class_counts = torch.bincount(self.dataset.targets)
        probs = class_counts / class_counts.sum()
        return -torch.sum(probs * torch.log(probs + 1e-9))
    
    def local_train(self, global_model, lr=0.01, epochs=5):
        """
        Perform local training (Algorithm 2)
        Args:
            global_model: Current global model parameters
            lr: Learning rate
            epochs: Local epochs
        Returns:
            gradients: Computed local gradients
        """
        self.model.load_state_dict(global_model)
        optimizer = torch.optim.SGD(self.model.parameters(), lr=lr)
        
        for _ in range(epochs):
            for data, target in self.dataset:
                optimizer.zero_grad()
                output = self.model(data)
                loss = torch.nn.functional.cross_entropy(output, target)
                loss.backward()
                optimizer.step()
        
        # Compute gradients as difference from initial state
        return [p.grad.clone() for p in self.model.parameters()]
