# 服务器，用来控制训练过程
import copy
import torch 
from trainers.fedbase.FedServerBase import FedServerBase
import logging

# logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.basicConfig(format ='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)



class FedMetaServer(FedServerBase):
    def __init__(self,worker_list,model,server_params,client_params):
        super(FedMetaServer, self).__init__(worker_list,model,server_params,client_params)
        if("base_layer" in server_params):
            self.base_layer=server_params["base_layer"]
        else:
            self.base_layer = 4

        # scaffold参数------------------------------------------------------------------------------------
        self.server_controls = [torch.zeros_like(param) for param in self.model.parameters()]
        logger.info("FedPerServer init:base_layer={}".format(self.base_layer))


    # 针对fedper重写这个函数，因为，个性化层的聚合只有前n层的参数参与计算。
    def calculate_imformation_increase(self,model_state_dict_list,old_accuracy,new_accuracy):
        """计算训练过程中的信息增益速度。用来衡量贡献度。待会进行计算。考虑到计算的准确率\n
        1. 计算梯度方向的投影
        2. 将所有客户端的投影进行归一化。得到本次贡献度的比例。a = x 內积 y /|y|
        3. 将accuracy变化作为本次round的权重。乘以归一化后的值进行保存。
        4. 为每一个客户端维护一个数值，表示当前累计的贡献度。形成一个向量。
        5. 返回本轮的贡献度比例向量。
        Args:
            old_param (list[tensor]): 原来参数
            new_param (list[tensor]): 本次参数
            accuracy (float): 准确率
        """
        with torch.no_grad():    
            # 等到连班个个性化层的参数数量
            parameters_number = len(self.get_tensor_vector_from_dict(model_state_dict_list[0]))
            # 因为存储的参数是一个列表形式，需要展开转换为1维的tensor，然后晕眩
            new_model_parameters = self.model.parameters()
            old_model_parameters_vector = self.get_tensor_vector_from_list(self.old_model_parameters)[:parameters_number]
            new_model_parameters_vector = self.get_tensor_vector_from_list(new_model_parameters)[:parameters_number]
            delta_model_parameters_vector = new_model_parameters_vector-old_model_parameters_vector
            # 求单位变化方向的向量.除以模长
            delta_param_vector  = delta_model_parameters_vector/ torch.norm(delta_model_parameters_vector,2)
    
            self.information_increase_list.append( torch.norm(delta_model_parameters_vector,2).item())

            information = torch.zeros([len(model_state_dict_list)])
            # 第i个客户端
            for i,model_state_dict in enumerate(model_state_dict_list):
                # 计算模型参数的变更方向，并将所有的内容展开，连接为一个向量
                client_model_parameters_vector = self.get_tensor_vector_from_dict(model_state_dict)
                client_delta_parameters_vector = client_model_parameters_vector-old_model_parameters_vector
                # 得到第i个客户端的信息增益
                information[i] = (delta_param_vector* client_delta_parameters_vector).sum()

            # 将信息增益矩阵进行百分比化.并乘以当前的变量。
            information =  information/information.sum()*(new_accuracy-old_accuracy)

            # 将信息增益矩阵增加准确率变化的考量
            self.information_increase_matrix += information
        # 返回当前轮次的信息增益矩阵
        return information
    
    # meta元模型聚合算法
    def aggregate_meta_with_personality_layer(self,model_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in model_dict:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))

    # 这是一个带有个性化层的版本。感觉这个思路也可以进行多步梯度下降。
    # 尝试一下，说不定效果出奇的好。
    def start_federated_with_personality_layer(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []
            running_accuracy = []
            # 发送模型。为每一个客户端发送一组新的模型复制
            state_dict =  self.model.state_dict()
            state_dict_base_layer = {key:state_dict[key] for i,key in enumerate(state_dict)  if i < self.base_layer}
            for worker in self.train_worker_list:
                worker.load_model_state_dict_base_layer(copy.deepcopy(state_dict_base_layer))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta_with_personality_layer())
                # 每个模型训练完成后进行本地测试
                # worker.test_meta()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict_base_layer())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_meta_with_personality_layer(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test_meta_with_personality_layer()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedMetaServer start_federated_with_personality_layer:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))


    # meta元模型聚合算法。仍旧使用权重进行聚合
    def aggregate_meta_with_multi_maml(self,model_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in model_dict:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))
        # meta元模型聚合算法。仍旧使用权重进行聚合
    
    # 在fedmaml的基础上。在单个设备上进行多轮maml梯度下降。看效果是否更好
    # 之前fedmaml梯度下降步骤太少。是导致聚合起来不好看的主要原因。
    def start_federated_with_multi_maml(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta_with_multi_maml())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_meta_with_multi_maml(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test_meta_with_multi_maml()/len(self.test_worker_list)
          
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedMetaServer start_federated_with_multi_maml:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))

    
    def aggregate_meta_with_multi_reptile(self,model_state_dict_list):
        # 由权重计算梯度，对梯度进行放缩，使用adam优化器进行融合。
        delta_model_state_dict = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in delta_model_state_dict:
            delta_model_state_dict[name] = torch.zeros(delta_model_state_dict[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        model_state_dict = self.model.state_dict()
        for model_dict in model_state_dict_list:
            for name in model_dict:
                delta_model_state_dict[name]+= (model_dict[name]-model_state_dict[name])*pi

        # 暂时不适用服务器上的优化器。以后的环节再使用。考虑加入adam优化器进行聚合的时候
        new_model_state_dict = copy.deepcopy(self.model.state_dict())
        for name in new_model_state_dict:
            new_model_state_dict[name]=new_model_state_dict[name]+delta_model_state_dict[name]*self.outer_lr

        self.model.load_state_dict(copy.deepcopy(new_model_state_dict))

    
    # 这是一个reptile版本的
    def start_federated_with_multi_reptile(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta_with_multi_reptile())
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_meta_with_multi_reptile(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test_meta_with_multi_reptile()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedMetaServer start_federated_with_multi_reptile:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))




    def aggregate_meta_with_reptile_adjust(self,model_state_dict_list):
        # 由权重计算梯度，对梯度进行放缩，使用adam优化器进行融合。
        delta_model_state_dict = copy.deepcopy(self.model.state_dict())
        # pi = torch.tensor(1.0/len(model_state_dict_list))
        # 根据历史贡献度计算clientweight用于合并
        client_weight = self.calculate_client_weight()
        for name in delta_model_state_dict:
            delta_model_state_dict[name] = torch.zeros(delta_model_state_dict[name].shape).to(self.device)
        # 使用带有贡献度计算的聚合算法
        model_state_dict = self.model.state_dict()
        for i,model_dict in enumerate(model_state_dict_list):
            for name in model_dict:
                delta_model_state_dict[name]+= (model_dict[name]-model_state_dict[name])*client_weight[i]


        # 暂时不适用服务器上的优化器。以后的环节再使用。考虑加入adam优化器进行聚合的时候
        new_model_state_dict = copy.deepcopy(self.model.state_dict())
        for name in new_model_state_dict:
            new_model_state_dict[name]=new_model_state_dict[name]+delta_model_state_dict[name]*self.outer_lr

        self.model.load_state_dict(copy.deepcopy(new_model_state_dict))


    def aggregate_controls(self,controls_list):
        """用来聚合控制参数

        Args:
            controls_list (list(controls)): 各个客户端的delta_controls聚合
        """      
        size = len(controls_list)  
        # 清零
        for control in self.server_controls:
            control.zero_()
        # 遍历相加
        for controls in controls_list:
            for i in range(len(self.server_controls)):
                self.server_controls[i] += controls[i] / size 

    
    # 在reptile的基础上对客户端的聚合进行调整。
    def start_federated_with_reptile_adjust(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []            
            results_controls=[]
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                worker.load_controls(copy.deepcopy(self.server_controls))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta_with_reptile_adjust())
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_controls.append(worker.get_controls())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_meta_with_reptile_adjust(results_model)
            self.aggregate_controls(results_controls)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test_meta_with_reptile_adjust()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedMetaServer start_federated:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))



    # 在上一版的基础上添加贡献度计算
    def start_federated_with_layer_adjust(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []
            running_accuracy = []
            # 发送模型。为每一个客户端发送一组新的模型复制
            state_dict =  self.model.state_dict()
            state_dict_base_layer = {key:state_dict[key] for i,key in enumerate(state_dict)  if i < self.base_layer}
            for worker in self.train_worker_list:
                worker.load_model_state_dict_base_layer(copy.deepcopy(state_dict_base_layer))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta())
                # 每个模型训练完成后进行本地测试
                # worker.test_meta()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict_base_layer())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_meta(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model_state_dict(copy.deepcopy(self.model.state_dict()))
                result_test += worker.test_meta()/len(self.test_worker_list)
            
            # 数据记录(计算信息矩阵、保存测试准确率、训练损失、训练准确率、当前的模型参数保存为旧模型的参数)
            if round>0:
                self.calculate_imformation_increase(results_model,self.test_accuracy[-1],result_test)

            self.test_accuracy.append(result_test)
            running_state = torch.tensor(running_accuracy)
            running_state = torch.mean(running_state,dim=0)
            self.train_loss.append(running_state[0].item())
            self.train_accuracy.append(running_state[1].item())
            self.old_model_parameters = copy.deepcopy(list(self.model.parameters()))
            logger.info("FedMetaServer start_federated:round={},test_acc={:.2f},train_loss={:.2f},train_acc={:.2f}".format(round, result_test,running_state[0].item(),running_state[1].item()))
