# 服务器，用来控制训练过程
import copy
import torch 
import logging
from trainers.fedbase.FedServerBase import FedServerBase

# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format ='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)

class FedAvgServer(FedServerBase):
    def __init__(self,worker_list,model,server_params,client_params):
        """初始化服务器。并设置相关的参数。
        1. 初始化服务器相关的参数：device
        2. 初始化深度学习相关参数：model、optimizer
        3. 初始化联邦学习相关参数：rounds、ratio、inner_lr、outer_lr
        3. 完成客户端配置。主要调用客户端设置参数、处理数据。

        Args:
            worker_list (tuple(train_worker_list,test_worker_list)): 包含训练客户端列表和测试客户端列表
            model (torch.model): 实现了torch.model类的子类
            server_params (dict): 服务器-联邦学习相关的参数
            client_params (dict): 客户端-一次训练相关的参数
        """  
        super(FedAvgServer, self).__init__(worker_list,model,server_params,client_params)
      
        logger.info("FedAvgServer init:")

    def aggregate_by_weight(self,model_state_dict_list):   
        """基于权重的参数聚合方案。这里以fedavg算法的权重聚合为模板。

        Args:
            model_state_dict_list (dict[tensor]): pytorch格式的模型权重
        """             
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))

    def start_federated(self):
        """训练过程。可以重复调用，延续之前的训练。但是不支持修改参数继续训练。
        1. 客户端加载当前模型
        2. 客户端执行训练返回loss和accuracy
        3. 客户端返回模型参数
        4. 聚合客户端模型参数
        5. 测试模型，并输出或者保存结果。
        """        
        # 客户端进行训练 迭代以下过程
        # self.train_worker_list=[self.train_worker_list[0],self.train_worker_list[0]]
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                 running_accuracy.append(worker.train())

            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_by_weight(results_model)
            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedAvgServer start_federated:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))