from copy import deepcopy
import torch.utils.data
import torch.nn.functional as F
from _dynamic_dataset.EvalType import EvalType
from server_client import copy_model_params


class Server(object):
    def __init__(self, conf, eval_loader, device, model):
        self.conf = conf
        # 全局模型
        self.global_model = deepcopy(model)
        # 老模型
        self.old_model = deepcopy(model)
        # 根据客户端上传的梯度进行排列组合，用于测量贡献度的模型
        self.sub_model = deepcopy(model)
        # 迭代器
        self.eval_loader = eval_loader
        # 子迭代器
        self.eval_sub_loader = None

        self.device = device
        self.global_model.to(device)
        self.sub_model.to(device)

    def model_aggregate(self, clients, target_model):
        if target_model == self.global_model:
            # 更新一下老模型参数
            copy_model_params(self.old_model, self.global_model)
        elif target_model == self.sub_model:
            # 还原子模型的参数
            copy_model_params(self.sub_model, self.old_model)
        sum_weight = 0
        # 计算总的权重
        for client in clients:
            sum_weight += client.weight
        # 初始化一个空字典来累积客户端的模型更新
        aggregated_updates = {}
        # 遍历每个客户端
        for client in clients:
            # 根据客户端的权重比例聚合更新
            for name, update in client.grad_update.items():
                if name not in aggregated_updates:
                    aggregated_updates[name] = update * client.weight / sum_weight
                else:
                    aggregated_updates[name] += update * client.weight / sum_weight
        # 应用聚合后的更新到sub_model
        for name, param in target_model.state_dict().items():
            if name in aggregated_updates:
                param.copy_(param + aggregated_updates[name])  # 累加更新到当前层参数上

    # 定义模型评估函数
    def model_eval(self, target_model, eval_loader):
        target_model.eval()
        total_loss = 0.0
        correct = 0
        dataset_size = 0
        with torch.no_grad():
            for batch_id,batch in enumerate(eval_loader):
                data, target = batch
                dataset_size += data.size()[0]
                # 放入和模型对应的设备
                data = data.to(self.device)
                target = target.to(self.device)
                # 模型预测
                output = target_model(data)
                # 把损失值聚合起来
                total_loss += torch.nn.functional.cross_entropy(output,target,reduction='sum').item()
                # 获取最大的对数概率的索引值
                _, predicted = torch.max(output.data, 1)
                pred = output.data.max(1)[1]
                correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()
            # 计算准确率
            acc = 100.0 * (float(correct) / float(dataset_size))
            # 计算损失值
            total_l = total_loss / dataset_size
            return acc, total_l

    def contribution_eval(self, target_model, eval_loader, device, eval_type, sample_ratio, sub_clients_nums, test_example_num, beta):
        # 初始化一个空列表用于收集每个批次的预测结果
        batch_results = []
        batch_confidences = []
        correct = 0
        total = 0
        target_model.eval()
        # 更新需要选取的测试用例
        self.eval_loader.dataset.updateIndices(eval_type, sample_ratio)
        indices = self.eval_loader.dataset.indices
        with torch.no_grad():
            for data in eval_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = target_model(images)
                confidences, predicted = torch.max(F.softmax(outputs, dim=1), 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
                if sub_clients_nums >= 3:
                    # 收集这一批次的预测正确性结果
                    batch_results.append((predicted == labels).cpu().int())
                    if eval_type == EvalType.WEIGHT_CHOOSE_IGNORE_SET:
                        batch_confidences.append(confidences)
            # 将收集的所有批次结果转换为一个张量
            # 按正常情况处理
            if sub_clients_nums >= 3:
                results = torch.cat(batch_results, dim=0)
                self.eval_loader.dataset.addEvalRecord(results)
                if eval_type == EvalType.WEIGHT_CHOOSE_IGNORE_SET:
                    confidences_result = torch.cat(batch_confidences, dim=0)
                    self.eval_loader.dataset.addConfidenceRecord(confidences_result, beta)
        # 真实的正确率
        precision1 = 100.0
        precision2 = 100 * correct / total
        ir = (test_example_num - len(indices)) / test_example_num * 100
        fit_precision = (precision1 * (test_example_num - len(indices)) / test_example_num) + (
                precision2 * (len(indices)) / test_example_num)

        return fit_precision,ir