# 服务器，用来控制训练过程
from numpy.core.defchararray import join
from FLClient import FLClient
import pandas as pd 
import numpy as np 
from sklearn.utils import shuffle
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import torch 
from torch import nn, optim
from dnn_model import Net
from imblearn.over_sampling import RandomOverSampler ,SMOTE, ADASYN,BorderlineSMOTE

import copy


class FLServer():
    # 配置学习过程中的一些参数。主要是联邦学习相关的参数。
    def __init__(self,train_dataset_list,test_dataset_list,server_params,client_params):
        # 基本参数---------------------------------------------------------------------------------------
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # 模型优化器参数
        self.model = Net().to(self.device)
        # 优化器
        self.is_aggregate_optimizer = False
        # 需要进行参数聚合的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001)
        # 不需要进行参数聚合的SGD优化器
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01)
        # 带有正则化项的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001,weight_decay=0.000001)
        # 带有正则化项的SGD优化器
        # self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01,weight_decay=0.000001)
        self.meta_layer = 4


        # 联邦学习参数--------------------------------------------------------------------------------------
        # 迭代的轮次
        if("rounds" in server_params):
            self.rounds=server_params["rounds"]
        else:
            self.rounds = 200
        # 客户端参与度。表示每轮有多少个客户端参与到训练当中
        if("ratio" in server_params):
            self.ratio = server_params["ratio"]
        else:
            self.ratio = 1.0
        
        # 本地更新的参数，这个参数就用epochs来表示吧。暂时保留
        self.update_steps = 5
        self.inner_lr =0.01 
        self.outter_lr = 1
        # 训练节点的个数和测试节点的个数，应该由数据集的划分决定。数据集中给了多少组训练数据就有多少个训练节点。测试节点类似。
        self.client_size=len(train_dataset_list)
        self.test_size=len(test_dataset_list)

        # 客户端相关的参数-------------------------------------------------------------------------------------
        # 创建多个客户端列表.也可以根据数据集的数量自己确定训练节点和测试节点的数量。
        self.train_worker_list = [FLClient(train_dataset,client_params) for train_dataset in train_dataset_list]
        self.test_worker_list = [FLClient(test_dataset,client_params) for test_dataset in test_dataset_list]


        print("FLServer Config:","----device:",self.device,"----round:",self.rounds,"----client_size:",self.client_size,"----test_size:",self.test_size)

    def aggerate_by_weight(self,model_state_dict_list):
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))

    
    # 尝试自己写梯度下降算法
    def aggerate_by_grad(self,grad_dict_list):
        """
        合并梯度(直接合并后除以参数的数量), 这个和 fedavg 不相同，与fedavg效果相同
        :param grads:
        :return:
        """
      
        # 定义0梯度字典
        new_grad_dict = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(grad_dict_list))
        for name in new_grad_dict:
            new_grad_dict[name] = torch.zeros(new_grad_dict[name].shape).to(self.device)
        
        # 将梯度字典累加，构成最后的梯度
        for grad_dict in grad_dict_list:
            for name in new_grad_dict:
                new_grad_dict[name]+= grad_dict[name]*pi

        # 将聚合后的梯度放到model_state_dict当中。累加之后进行梯度下降。可以在这里尝试使用优化器。optimizer
        model_state_dict = self.model.state_dict()
        for name in new_grad_dict:
            model_state_dict[name]-=new_grad_dict[name]
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(model_state_dict)


    # 联邦平均算法聚合模型
    def aggrerate_avg(self,model_state_dict_list):
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))
    
    # 逆距离聚合
    def aggrerate_ida(self,model_state_dict_list):
        # fed_ida算法
        new_par = copy.deepcopy(self.model.state_dict())
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        
        # 计算逆距离的权重
        a = torch.zeros(len(model_state_dict_list)).to(self.device)
        for i,model_dict in  enumerate(model_state_dict_list):
            for name in model_dict:
                # 使用f1范数（论文中的方法）
                a[i]+= model_dict[name].norm(1)
        a = 1/a
        a = a /a.sum()
        # 使用你距离聚合算法进行聚合
        for i,model_dict in enumerate(model_state_dict_list):
            for name in new_par:
                new_par[name]+= model_dict[name]*a[i]
        self.model.load_state_dict(copy.deepcopy(new_par))
    
    # meta元模型聚合算法
    def aggrerate_meta(self,model_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in model_dict:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))

    # meta元模型聚合算法。仍旧使用权重进行聚合
    def aggrerate_maml(self,model_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in model_dict:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))
        # meta元模型聚合算法。仍旧使用权重进行聚合
    
    def aggrerate_maml_by_grad(self,grad_dict_list):
        # 定义0梯度字典
        new_grad_dict = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(grad_dict_list))
        for name in new_grad_dict:
            new_grad_dict[name] = torch.zeros(new_grad_dict[name].shape).to(self.device)
        
        # 将梯度字典累加，构成最后的梯度
        for grad_dict in grad_dict_list:
            for i, name in enumerate(new_grad_dict):
                new_grad_dict[name]+= grad_dict[i]*pi

        # 将聚合后的梯度放到model_state_dict当中。累加之后进行梯度下降。可以在这里尝试使用优化器。optimizer
        # beta=0.1
        model_state_dict = self.model.state_dict()
        for name in new_grad_dict:
            model_state_dict[name]-=new_grad_dict[name]
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(model_state_dict)

    def aggregate_reptile(self,model_state_dict_list):
        # 由权重计算梯度，对梯度进行放缩，使用adam优化器进行融合。
        new_grad = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_grad:
            new_grad[name] = torch.zeros(new_grad[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        model_state_dict = self.model.state_dict()
        for model_dict in model_state_dict_list:
            for name in model_state_dict:
                new_grad[name]+= (model_dict[name]-model_state_dict[name])*pi

        # 将梯度加载到模型当中。并进行梯度下降
        named_parameters = self.model.named_parameters()

        self.optimizer.zero_grad()
        for name,param in named_parameters:
            param.grad=new_grad[name]* self.outter_lr
        self.optimizer.step()

    def aggregate_reptile_by_grad(self,grad_dict_list):
        # 由权重计算梯度，对梯度进行放缩，使用adam优化器进行融合。
        new_grad = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(grad_dict_list))
        for name in new_grad:
            new_grad[name] = torch.zeros(new_grad[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in grad_dict_list:
            for name in model_dict:
                new_grad[name]+= model_dict[name]*pi

        # 将梯度加载到模型当中。并进行梯度下降
        named_parameters = self.model.named_parameters()

        self.optimizer.zero_grad()
        for name,param in named_parameters:
            param.grad = new_grad[name]* self.outter_lr
        self.optimizer.step()

    # meta元模型聚合算法
    def aggrerate_per(self,model_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        # 使用联邦平均算法进行聚合
        for model_dict in model_state_dict_list:
            for name in model_dict:
                new_par[name]+= model_dict[name]*pi
        # 将聚合后的参数加载到模型当中
        self.model.load_state_dict(copy.deepcopy(new_par))


    # 优化器参数聚合
    def aggrerate_optimizer(self,model_state_dict_list,optim_state_dict_list):
        new_opt_par = copy.deepcopy(optim_state_dict_list[0])
        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        #print(new_opt_par["param_groups"][0][ 'params'])#这是模型每一层参数的索引，是一个list
        #print(new_opt_par["param_groups"][0])#这是模型每一层参数的索引，是一个list,state param_groups
        
        # 将所有的参数初始化为0
        for index in new_opt_par["param_groups"][0][ 'params']:

            new_opt_par["state"][index]['step'] = 0
            new_opt_par["state"][index]['exp_avg'] = torch.zeros(new_opt_par["state"][index]['exp_avg'].shape)
            new_opt_par["state"][index]['exp_avg_sq'] = torch.zeros(new_opt_par["state"][index]['exp_avg_sq'].shape)
            #print(new_opt_par["state"][index]) #这是index这一层神经网络的参数:step,vt,mt,是一个Dict

        for idx, par in enumerate(optim_state_dict_list):
            pi = torch.tensor(1.0/len(model_state_dict_list))
            for i in range(len(new_opt_par["param_groups"][0][ 'params'])):
                index = new_opt_par["param_groups"][0]['params'][i] #第i层参数的索引
                index2 = par["param_groups"][0]['params'][i] #第i层参数的索引
                new_opt_par["state"][index]['step'] += par["state"][index2]['step'] * pi 
                new_opt_par["state"][index]['exp_avg'] += par["state"][index2]['exp_avg']* pi 
                new_opt_par["state"][index]['exp_avg_sq'] += par["state"][index2]['exp_avg_sq']* pi 

        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        self.optimizer.load_state_dict(copy.deepcopy(new_opt_par))

    # 定义了训练的整个过程。
    def start_federated(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                worker.train()
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_avg(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            print(round,result_test)
    
   # 定义了训练的整个过程。
    def start_federated_avg(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                worker.train()
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_avg(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            print(round,result_test)
    def start_federated_ida(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                worker.train()
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_ida(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            print(round,result_test)

    def start_federated_prox(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                worker.train_prox()
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_avg(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            print(round,result_test)




    def start_federated_meta(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []
            running_accuracy = []
            # 发送模型。为每一个客户端发送一组新的模型复制
            state_dict =  self.model.state_dict()
            meta_dict = {key:state_dict[key] for i,key in enumerate(state_dict)  if i < self.meta_layer}
            for worker in self.train_worker_list:
  
                worker.load_model_meta(copy.deepcopy(meta_dict),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_meta())
                # 每个模型训练完成后进行本地测试
                # worker.test_meta()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict_meta())
                results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_meta(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_meta()/len(self.test_worker_list)
            print(round,result_test,"----",running_accuracy)


    def start_federated_maml(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_maml())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_maml(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_maml()/len(self.test_worker_list)
            print(round,result_test)    



    def start_federated_maml_by_grad(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            grad_dict_list = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_maml_by_grad())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                grad_dict_list.append(worker.get_grad_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_maml_by_grad(grad_dict_list)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_maml()/len(self.test_worker_list)
            print(round,result_test) 
    

    def start_federated_reptile(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_reptile())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_reptile(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_maml()/len(self.test_worker_list)
            print(round,result_test)    

    def start_federated_reptile_by_grad(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            # results_optim = []
            running_accuracy = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_reptile_by_grad())
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_grad_dict())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggregate_reptile_by_grad(results_model)

            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_maml()/len(self.test_worker_list)
            print(round,result_test)   


    def start_federated_per(self):
        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []
            running_accuracy = []
            # 发送模型。为每一个客户端发送一组新的模型复制
            state_dict =  self.model.state_dict()
            meta_dict = {key:state_dict[key] for i,key in enumerate(state_dict)  if i < self.meta_layer}
            for worker in self.train_worker_list:
                worker.load_model_meta(copy.deepcopy(meta_dict),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                running_accuracy.append(worker.train_per())
                # 每个模型训练完成后进行本地测试
                # worker.test_meta()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict_meta())
                # results_optim.append(worker.get_optim_state_dict())
            
            # 聚合训练参数。聚合策略的选择
            self.aggrerate_per(results_model)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test_meta()/len(self.test_worker_list)
            print(round,result_test,"----",running_accuracy)


# 用来加载配置参数。进行一次训练。如果使用类来进行训练会导致一个问题。可以在训练过程中直接修改类的参数。等拆分后再使用。
def start_federated_learning(data_params,server_params,client_params):
    pass


if __name__=="__main__":
    from data_load import DataManager
    dm = DataManager()
    dm.load_data()
    server = FLServer()
    server.start_federated(dm.allocate_data_avg())
