import torch 
from trainers.fedbase.FedServerBase import FedServerBase
import logging
import copy
# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)


class FedIdaServer(FedServerBase):
   # 配置学习过程中的一些参数。主要是联邦学习相关的参数。
    def __init__(self,worker_list,model,server_params,client_params):
        super(FedIdaServer,self).__init__(worker_list,model,server_params,client_params)
        print("FedIdaServer init----")

    # 逆距离聚合
    def aggregate_by_weight(self,model_state_dict_list):
        """逆距离聚合
        1. 首先计算更新的幅度的1范数
        2. 针对1范数的倒数，进行标准化。作为其合并的权重

        本质上还是压缩更新的服务。相当于更新幅度大的权重小一些。
        更新幅度小的，权重大一些。
        2+8=10 
        1/0.2*2+1/0.8*8=0.8*2+0.2*8=3.2

        Args:
            model_state_dict_list (list[dict[tensor]]): 客户端列表、参数字典
        """        
        # fed_ida算法
        new_par = copy.deepcopy(self.model.state_dict())
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        
        # 计算逆距离的权重
        a = torch.zeros(len(model_state_dict_list)).to(self.device)
        old_state_dict = self.model.state_dict()
        for i,model_dict in  enumerate(model_state_dict_list):
            for name in model_dict:
                # 使用f1范数（论文中的方法）
                a[i]+= torch.norm(model_dict[name]-old_state_dict[name],1)
        a = 1/a
        a = a /a.sum()
        # 使用你距离聚合算法进行聚合
        for i,model_dict in enumerate(model_state_dict_list):
            for name in new_par:
                new_par[name]+= model_dict[name]*a[i]
        self.model.load_state_dict(copy.deepcopy(new_par))
    
    def start_federated(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_by_weight(results_model)
            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedIdaServer start_federated:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))