import torch
import torch.utils.data
import copy
import models
import random

class NewServer(object):
    def __init__(self, conf, eval_dataset):
        self.conf = conf
        if conf.get('model') == 'resnet18':
            self.global_model = models.resnet18()
        else:
            self.global_model = models.lenet5v()
        self.eval_loader = torch.utils.data.DataLoader(
            eval_dataset,
            batch_size=self.conf["batch_size"],
            shuffle=True
        )

        # FedCDA相关参数
        self.current_round = 0
        self.warmup_rounds = conf.get('warmup', 50)
        self.client_groups = []
        if torch.cuda.is_available():
            self.global_model = self.global_model.cuda()

    def calculate_divergence(self, model_a, model_b):
        """计算两个模型之间的差异（L2范数）"""
        total_divergence = 0.0
        for name in model_a:
            param_a = model_a[name].cpu()
            param_b = model_b[name].cpu()
            total_divergence += torch.norm(param_a - param_b).item()
        return total_divergence

    def greedy_model_selection(self, client_histories, selected_clients):
        """贪心算法选择最优模型版本组合"""
        selected_versions = {}

        for client_id in selected_clients:
            if client_id not in client_histories or not client_histories[client_id]:
                continue

            min_divergence = float('inf')
            best_idx = 0

            # 遍历该客户端所有历史模型
            for idx, model in enumerate(client_histories[client_id]):
                divergence = self.calculate_divergence(model, self.global_model.state_dict())
                if divergence < min_divergence:
                    min_divergence = divergence
                    best_idx = idx

            selected_versions[client_id] = best_idx

        return selected_versions

    def fedcda_aggregate(self, client_histories, selected_clients, active_client_count, client_groups=None, group_weights=None):
        """FedCDA聚合策略，支持组内聚合"""
        # 预热阶段使用最新模型
        if self.current_round < self.warmup_rounds:
            selected_versions = {
                client_id: len(client_histories[client_id]) - 1
                for client_id in selected_clients
                if client_id in client_histories and client_histories[client_id]
            }
        else:
            # 优化阶段：贪心选择最优历史模型
            selected_versions = self.greedy_model_selection(client_histories, selected_clients)

        # 初始化权重累加器
        weight_accumulator = {}
        for name, params in self.global_model.state_dict().items():
            weight_accumulator[name] = torch.zeros_like(params)

        # 聚合选中的模型
        for client_id in selected_clients:
            if client_id not in client_histories or not client_histories[client_id]:
                continue

            # 如果启用了分组策略
            if client_groups is not None and group_weights is not None:
                # 获取该客户端的组
                group = client_groups[client_id]
                
                # 只聚合组内的客户端
                for group_client_id in group:
                    if group_client_id not in client_histories or not client_histories[group_client_id]:
                        continue
                    
                    # 获取该客户端在组内的权重
                    client_weight = group_weights[client_id][group_client_id].item()
                    
                    # 获取选择的模型版本
                    if group_client_id in selected_versions:
                        version_idx = selected_versions[group_client_id]
                        if version_idx < len(client_histories[group_client_id]):
                            model_params = client_histories[group_client_id][version_idx]
                            
                            # 计算加权更新
                            for name, data in self.global_model.state_dict().items():
                                weight_accumulator[name].add_((model_params[name] - data) * client_weight)
            else:
                # 原始的FedCDA聚合逻辑
                if client_id in selected_versions:
                    version_idx = selected_versions[client_id]
                    if version_idx < len(client_histories[client_id]):
                        model_params = client_histories[client_id][version_idx]

                        # 计算相对于全局模型的更新
                        for name, data in self.global_model.state_dict().items():
                            weight_accumulator[name].add_(model_params[name] - data)

        # 更新全局模型
        self.model_aggregrate_new(weight_accumulator, active_client_count)
        self.current_round += 1

        return selected_versions
    # def fedcda_aggregate(self, client_histories, selected_clients, active_client_count):
    #     """FedCDA聚合策略"""
    #     # 预热阶段使用最新模型
    #     if self.current_round < self.warmup_rounds:
    #         selected_versions = {
    #             client_id: len(client_histories[client_id]) - 1
    #             for client_id in selected_clients
    #             if client_id in client_histories and client_histories[client_id]
    #         }
    #     else:
    #         # 优化阶段：贪心选择最优历史模型
    #         selected_versions = self.greedy_model_selection(client_histories, selected_clients)

    #     # 初始化权重累加器
    #     weight_accumulator = {}
    #     for name, params in self.global_model.state_dict().items():
    #         weight_accumulator[name] = torch.zeros_like(params)

    #     # 聚合选中的模型
    #     for client_id in selected_clients:
    #         if client_id not in client_histories or not client_histories[client_id]:
    #             continue

    #         if client_id in selected_versions:
    #             version_idx = selected_versions[client_id]
    #             if version_idx < len(client_histories[client_id]):
    #                 model_params = client_histories[client_id][version_idx]

    #                 # 计算相对于全局模型的更新
    #                 for name, data in self.global_model.state_dict().items():
    #                     weight_accumulator[name].add_(model_params[name] - data)

    #     # 更新全局模型
    #     self.model_aggregrate_new(weight_accumulator, active_client_count)
    #     self.current_round += 1

    #     return selected_versions


    def simple_aggregate(self, selected_clients, clients):
        """预热阶段的FedAvg聚合方法"""
        # 初始化权重累加器
        weight_accumulator = {}
        for name, params in self.global_model.state_dict().items():
            weight_accumulator[name] = torch.zeros_like(params)
        # 聚合选中的客户端的最新模型
        if type(selected_clients)!=list:
            for group in range(len(selected_clients)):
                for client_id in selected_clients[group]:
                    client = clients[client_id]
                    client_model = client.get_model_history()[-1]  # 获取最新的模型参数

                    # 计算相对于全局模型的更新
                    for name, data in self.global_model.state_dict().items():
                        weight_accumulator[name].add_(client_model[name] - data)
        else:
            for client_id in selected_clients:
                client = clients[client_id]
                client_model = client.get_model_history()[-1]  # 获取最新的模型参数
                
                # 计算相对于全局模型的更新
                for name, data in self.global_model.state_dict().items():
                    weight_accumulator[name].add_(client_model[name] - data)

        # 更新全局模型
        self.model_aggregrate_new(weight_accumulator, len(selected_clients))
        self.current_round += 1

    def model_aggregrate_new(self, weight_accumulator, num):
        """聚合函数，更新全局模型"""
        for name, data in self.global_model.state_dict().items():
            update_per_layer = weight_accumulator[name] * (1 / num)
            if data.type()!=update_per_layer.type():
                data.add_(update_per_layer.to(torch.int64))
            else:
                data.add_(update_per_layer)
    
    def compute_client_groups(self, clients, public_dataset):
        """计算客户端分组（CGDS策略）和权重（SAW策略）"""
        print("计算客户端分组和权重...")
        print("================public_dataset:{}=====================".format(public_dataset))
        # 创建公共数据集的数据加载器
        public_loader = torch.utils.data.DataLoader(
            dataset=public_dataset, 
            batch_size=self.conf.get("batch_size", 64), 
            shuffle=False
        )
        
        # 存储每个客户端在公共数据集上的推理结果
        client_logits = []
        print("================public_dataset:{}=====================".format(public_loader))
        # 对每个客户端模型进行前向推理
        for client in clients:
            client_model = copy.deepcopy(client.local_model)
            if torch.cuda.is_available():
                client_model = client_model.cuda()
            client_model.eval()
            
            batch_logits = []
            with torch.no_grad():
                for images, _ in public_loader:
                    if torch.cuda.is_available():
                        images = images.cuda()
                    logits = client_model(images)
                    batch_logits.append(logits.detach().cpu())
            
            # 合并所有批次的结果
            client_logits.append(torch.cat(batch_logits, dim=0))
            
            del client_model
        
        # 计算每个客户端的平均类置信度向量
        client_avg_logits = [logits.mean(dim=0) for logits in client_logits]
        client_avg_logits = torch.stack(client_avg_logits)
        
        # 计算客户端之间的相似度矩阵
        client_num = len(clients)
        similarity_matrix = torch.zeros((client_num, client_num))
        
        for i in range(client_num):
            for j in range(client_num):
                if i == j:
                    similarity_matrix[i, j] = 1.0
                else:
                    similarity_matrix[i, j] = torch.cosine_similarity(
                        torch.nn.functional.softmax(client_avg_logits[i], dim=0),
                        torch.nn.functional.softmax(client_avg_logits[j], dim=0),
                        dim=0
                    )
        
        # 基于相似度矩阵进行分组
        client_groups = {i: [] for i in range(client_num)}
        
        for i in range(client_num):
            for j in range(client_num):
                # 使用概率阈值决定是否将客户端j加入到客户端i的组中
                if random.random() < similarity_matrix[i, j].item():
                    client_groups[i].append(j)
        
        # 计算每个组内客户端的权重
        group_weights = []
        for i in range(client_num):
            group = client_groups[i]
            if len(group) == 1:
                # 如果组内只有一个客户端，权重为1
                weights = torch.zeros(client_num)
                weights[group[0]] = 1.0
            else:
                # 使用softmax计算组内客户端的权重
                group_sim = similarity_matrix[i, group]
                group_weights_local = torch.nn.functional.softmax(group_sim, dim=0)
                
                weights = torch.zeros(client_num)
                for idx, client_idx in enumerate(group):
                    weights[client_idx] = group_weights_local[idx]
            
            group_weights.append(weights)
        
        group_weights = torch.stack(group_weights, dim=0)
        
        return client_groups, group_weights

    def model_eval(self):
        """评估全局模型性能"""
        self.global_model.eval()

        total_loss = 0.0
        correct = 0
        dataset_size = 0

        for batch_id, batch in enumerate(self.eval_loader):
            data, target = batch
            dataset_size += data.size()[0]

            if torch.cuda.is_available():
                data, target = data.cuda(), target.cuda()

            output = self.global_model(data)
            total_loss += torch.nn.functional.cross_entropy(output, target, reduction='sum').item()

            pred = output.data.max(1)[1]
            correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()

        acc = 100.0 * (float(correct) / float(dataset_size))
        total_l = total_loss / dataset_size

        return acc, total_l