
import numpy as np
from device_heterogeneity import DeviceHeterogeneityMetrics
from entropy_metrics import EntropyMetrics
from temperature_attention import TemperatureAttentionController
from dynamic_aggregation import DHAAggregator
from resource_adaptation import ResourceAdapter

class DHAFLSystem:
    """DHA-FL系统主框架"""
    def __init__(self, num_clients, model_dim):
        self.num_clients = num_clients
        self.model_dim = model_dim
        self.hetero_metrics = DeviceHeterogeneityMetrics()
        self.entropy_metrics = EntropyMetrics(model_dim)
        self.temp_controller = TemperatureAttentionController()
        self.aggregator = DHAAggregator()
        self.resource_adapter = ResourceAdapter()
        
        # 初始化全局模型
        self.global_model = np.random.randn(model_dim)
        self.client_states = [{
            'model': np.random.randn(model_dim),
            'entropy_history': [],
            'hetero_score': np.random.rand(),
            'layer': 'medium'
        } for _ in range(num_clients)]
        
    def client_update(self, client_idx, round_num):
        """客户端本地更新"""
        client = self.client_states[client_idx]
        
        # 获取当前温度
        entropy_change = self.entropy_metrics.entropy_change_rate(client['entropy_history'])
        temperature = self.temp_controller.update_temperature(entropy_change)
        
        # 资源自适应
        if client['layer'] == 'low':
            client['model'] = self.resource_adapter.lightweight_deployment(
                self.global_model, client['data'], temperature)
        
        # 本地训练 (简化实现)
        gradients = np.random.randn(self.model_dim)
        client['gradients'] = gradients
        
        # 更新熵值
        entropy = self.entropy_metrics.gradient_entropy(gradients)
        client['entropy_history'].append(entropy)
        
        return gradients
    
    def server_aggregation(self, round_num):
        """服务器聚合更新"""
        # 计算各设备状态熵
        state_entropies = []
        for client in self.client_states:
            state_entropy = self.aggregator.device_state_entropy(
                client['hetero_score'], client['entropy_history'][-1])
            state_entropies.append(state_entropy)
        
        # 获取当前温度
        avg_entropy_change = np.mean([
            self.entropy_metrics.entropy_change_rate(c['entropy_history']) 
            for c in self.client_states])
        temperature = self.temp_controller.update_temperature(avg_entropy_change)
        
        # 动态分层
        layer_assignments = {'high': [], 'medium': [], 'low': []}
        for i, client in enumerate(self.client_states):
            layer = self.aggregator.dynamic_hierarchization(state_entropies[i], temperature)
            client['layer'] = layer
            layer_assignments[layer].append(i)
        
        # 分层聚合
        aggregated_grad = np.zeros(self.model_dim)
        layer_weights = self.temp_controller.get_layer_weights(temperature)
        
        for layer_idx, layer_type in enumerate(['high', 'medium', 'low']):
            client_indices = layer_assignments[layer_type]
            if not client_indices:
                continue
                
            # 层内聚合
            layer_entropies = [self.client_states[i]['entropy_history'][-1] for i in client_indices]
            layer_grads = [self.client_states[i]['gradients'] for i in client_indices]
            
            # 注意力权重分配
            weights = self.aggregator.attention_aggregation(
                layer_type, layer_entropies, temperature)
            
            # 轻量化梯度校正
            if layer_type != 'high':
                teacher_grad = np.mean([self.client_states[i]['gradients'] 
                                      for i in layer_assignments['high']], axis=0)
                for i in client_indices:
                    self.client_states[i]['gradients'] = self.aggregator.lightweight_correction(
                        teacher_grad, self.client_states[i]['gradients'], temperature)
            
            layer_agg_grad = np.sum([w * g for w, g in zip(weights, layer_grads)], axis=0)
            aggregated_grad += layer_weights[layer_idx] * layer_agg_grad
        
        # 更新全局模型
        self.global_model -= 0.01 * aggregated_grad
        
        return self.global_model

# 系统运行示例
if __name__ == "__main__":
    system = DHAFLSystem(num_clients=10, model_dim=100)
    
    # 模拟训练过程
    for round_num in range(100):
        # 客户端并行更新
        for client_idx in range(10):
            system.client_update(client_idx, round_num)
        
        # 服务器聚合
        global_model = system.server_aggregation(round_num)
        print(f"Round {round_num}: Global model updated")
