# 服务器，用来控制训练过程
import copy
import torch 
from trainers.fedbase.FedServerBase import FedServerBase
import logging

# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format ='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)


class FedScaffoldServer(FedServerBase):
    # 配置学习过程中的一些参数。主要是联邦学习相关的参数。
    def __init__(self,worker_list,model,server_params,client_params):
        """初始化服务器。并设置相关的参数。
        Args:
            worker_list (tuple(train_worker_list,test_worker_list)): 包含训练客户端列表和测试客户端列表
            model (torch.model): 实现了torch.model类的子类
            server_params (dict): 服务器-联邦学习相关的参数
            client_params (dict): 客户端-一次训练相关的参数
        """        
        super(FedScaffoldServer, self).__init__(worker_list,model,server_params,client_params)
        
        # scaffold参数------------------------------------------------------------------------------------
        self.server_controls = [torch.zeros_like(param) for param in self.model.parameters()]
        logger.info("FedScaffoldServer init----")

    def aggregate_by_weight(self,model_state_dict_list):        
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))

    def aggregate_controls(self,controls_list):
        """用来聚合控制参数

        Args:
            controls_list (list(controls)): 各个客户端的delta_controls聚合
        """      
        size = len(controls_list)  
        # 清零
        for control in self.server_controls:
            control.zero_()
        # 遍历相加
        for controls in controls_list:
            for i in range(len(self.server_controls)):
                self.server_controls[i] += controls[i] / size

    # 定义了训练的整个过程。
    def start_federated(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_controls=[]
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                worker.load_controls(copy.deepcopy(self.server_controls))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train())

            
            # 返回训练参数、和控制参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_controls.append(worker.get_controls())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_by_weight(results_model)
            self.aggregate_controls(results_controls)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedScaffold start_federated:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))  
