# File: fedentgate/main.py
"""Main Training Loop for FedEntGate Framework"""

import torch
from .client import FLClient
from .server import FLServer
from .entropy_gating import EntropyGating
from .dp_noise_injection import DPNoiseGenerator

def run_fedentgate(num_rounds=100, num_clients=100, participation_rate=0.3):
    """
    Execute FedEntGate training process (Algorithm 1)
    Args:
        num_rounds: Total training rounds (T)
        num_clients: Total number of clients (K)
        participation_rate: Client sampling rate per round
    """
    # Initialize components
    global_model = create_model()  # Placeholder for model initialization
    server = FLServer(global_model, num_clients)
    gating = EntropyGating()
    dp_noise = DPNoiseGenerator()
    
    # Initialize clients (simplified)
    clients = [FLClient(i, load_client_data(i)) for i in range(num_clients)]
    
    for round in range(num_rounds):
        # Server: Select participating clients
        participants = select_clients(num_clients, participation_rate)
        
        # Update entropy thresholds
        for client_id in participants:
            gating.update_threshold(client_id, round, num_rounds)
        
        client_updates = {}
        for client_id in participants:
            client = clients[client_id]
            
            # Client: Local training
            gradients = client.local_train(server.global_model.state_dict())
            
            # Gating decision
            gating_state = gating.gating_decision(client.entropy, client_id)
            
            # Apply DP noise
            noisy_grads, noise_scale = dp_noise.add_noise(gradients, gating_state)
            
            # Track privacy cost
            dp_noise.track_privacy_cost(noise_scale, participation_rate)
            
            # Store update with noise feature
            client_updates[client_id] = (noisy_grads, 1/(noise_scale+1e-6))
            server.update_client_state(client_id, client.entropy, gating_state)
        
        # Server aggregation
        avg_update = server.aggregate_updates(client_updates)
        
        # Update global model
        with torch.no_grad():
            for param, update in zip(server.global_model.parameters(), avg_update):
                param -= update
        
        # Log privacy expenditure
        epsilon, delta = dp_noise.get_privacy_spent()
        print(f"Round {round}: ε={epsilon:.2f}, δ={delta}")
    
    return server.global_model


