# 服务器，用来控制训练过程
from numpy.core.defchararray import join
from FLClient import FLClient
import pandas as pd 
import numpy as np 
from sklearn.utils import shuffle
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import torch 
from torch import nn, optim
from dnn_model import Net
from imblearn.over_sampling import RandomOverSampler ,SMOTE, ADASYN,BorderlineSMOTE

import copy

class FLServer():
    # 配置学习过程中的一些参数。主要是联邦学习相关的参数。
    def __init__(self,train_dataset_list,test_dataset_list):
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # 模型优化器参数
        self.model = Net().to(self.device)
        # 优化器
        self.is_aggregate_optimizer = False
        # 需要进行参数聚合的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001)
        # 不需要进行参数聚合的SGD优化器
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01)
        # 带有正则化项的Adam优化器
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001,weight_decay=0.000001)
        # 带有正则化项的SGD优化器
        # self.optimizer = torch.optim.SGD(self.model.parameters(),lr=0.01,weight_decay=0.000001)


        # 策略控制参数
        # 聚合策略。0表示fedavg,1表示ida
        self.aggrerate_strategy = 0

        # 联邦学习参数
        # 迭代的轮次
        self.rounds = 200
        # 训练节点的个数和测试节点的个数，应该由数据集的划分决定。数据集中给了多少组训练数据就有多少个训练节点。测试节点类似。
        self.client_size=len(train_dataset_list)
        self.test_size=len(test_dataset_list)

        # 创建多个客户端列表.也可以根据数据集的数量自己确定训练节点和测试节点的数量。
        self.train_worker_list = [FLClient(train_dataset) for train_dataset in train_dataset_list]
        self.test_worker_list = [FLClient(test_dataset) for test_dataset in test_dataset_list]


        print("FLServer Config:","----device:",self.device,"----round:",self.rounds,"----client_size:",self.client_size,"----test_size:",self.test_size)


    # 联邦平均算法聚合模型
    def aggrerate_avg(self,model_state_dict_list,optim_state_dict_list):
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
        
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        
        self.model.load_state_dict(copy.deepcopy(new_par))
    
    # 逆距离聚合
    def aggrerate_idx(self,model_state_dict_list,optim_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape)
        
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name] += model_dict[name]*pi
        
        # 实现3个副本
        w_Avg = copy.deepcopy(new_par)
        w_Avg_w = copy.deepcopy(new_par)
        Z = copy.deepcopy(new_par)

        # 将3 个副本初始化为0
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape)
            w_Avg_w[name] = torch.zeros(new_par[name].shape)
            Z[name] = torch.zeros([])

        for model_dict in model_state_dict_list:
            #w_avg - w 表示距离平均值的距离
            for name in new_par:
                w_Avg_w[name] = w_Avg[name] - model_dict[name]
                w_Avg_w[name].flatten()
                w_Avg_w[name] = torch.norm(w_Avg_w[name], p=1)
                Z[name] += 1/w_Avg_w[name]

        for par in model_state_dict_list:
            for name in new_par:
                new_par[name] += (1/torch.norm((w_Avg[name]-par[name]).flatten(),p=1))*(1/Z[name])*par[name]
        
        self.model.load_state_dict(copy.deepcopy(new_par))

    # prox聚合方法
    def aggrerate_prox(self,model_state_dict_list,optim_state_dict_list):
        if False:
            for idx, par in enumerate(model_state_dict_list):
                for name in new_par:
                    new_par[name] += par[name] /len(model_state_dict_list)
            w_Avg = copy.deepcopy(new_par)
            w_Avg_w = copy.deepcopy(new_par)
            Z = copy.deepcopy(new_par)

            for name in new_par:
                new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
                w_Avg_w[name] = torch.zeros(new_par[name].shape).to(self.device)
                Z[name] = torch.zeros([]).to(self.device)
 
            for idx, par in enumerate(model_par):
                #w_avg - w
                for name in new_par:
                    w_Avg_w[name] = w_Avg[name] - par[name]
                    w_Avg_w[name].flatten()
                    w_Avg_w[name] = torch.norm(w_Avg_w[name], p=1)
                    Z[name] += 1/w_Avg_w[name]

            for idx, par in enumerate(model_par):
                for name in new_par:
                    new_par[name] += (1/torch.norm((w_Avg[name]-par[name]).flatten(),p=1))*(1/Z[name])*par[name]
    
    # 优化器参数聚合
    def aggrerate_optimizer(self,model_state_dict_list,optim_state_dict_list):
        new_opt_par = copy.deepcopy(optim_state_dict_list[0])
        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        #print(new_opt_par["param_groups"][0][ 'params'])#这是模型每一层参数的索引，是一个list
        #print(new_opt_par["param_groups"][0])#这是模型每一层参数的索引，是一个list,state param_groups
        
        # 将所有的参数初始化为0
        for index in new_opt_par["param_groups"][0][ 'params']:

            new_opt_par["state"][index]['step'] = 0
            new_opt_par["state"][index]['exp_avg'] = torch.zeros(new_opt_par["state"][index]['exp_avg'].shape)
            new_opt_par["state"][index]['exp_avg_sq'] = torch.zeros(new_opt_par["state"][index]['exp_avg_sq'].shape)
            #print(new_opt_par["state"][index]) #这是index这一层神经网络的参数:step,vt,mt,是一个Dict

        for idx, par in enumerate(optim_state_dict_list):
            pi = torch.tensor(1.0/len(model_state_dict_list))
            for i in range(len(new_opt_par["param_groups"][0][ 'params'])):
                index = new_opt_par["param_groups"][0]['params'][i] #第i层参数的索引
                index2 = par["param_groups"][0]['params'][i] #第i层参数的索引
                new_opt_par["state"][index]['step'] += par["state"][index2]['step'] * pi 
                new_opt_par["state"][index]['exp_avg'] += par["state"][index2]['exp_avg']* pi 
                new_opt_par["state"][index]['exp_avg_sq'] += par["state"][index2]['exp_avg_sq']* pi 

        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        self.optimizer.load_state_dict(copy.deepcopy(new_opt_par))

    # 定义了训练的整个过程。
    def start_federated(self):

        # 客户端进行训练 迭代以下过程
        for round in range(self.rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in self.train_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in self.train_worker_list:
                worker.train()
                # 每个模型训练完成后进行本地测试
                # worker.test()
            
            # 返回训练参数
            for worker in self.train_worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            # 聚合训练参数。聚合策略的选择
            if(self.aggrerate_strategy==0):
                self.aggrerate_avg(results_model,results_optim)
            elif(self.aggrerate_strategy==1):
                self.aggrerate_idx(results_model,results_optim)
            elif(self.aggrerate_strategy==2):
                self.aggrerate_prox(results_model,results_optim)

            if(self.is_aggregate_optimizer):
                self.aggrerate_optimizer(results_model,results_optim)

            
            # 对模型进行验证
            result_test=0
            for worker in self.test_worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
                result_test += worker.test()/len(self.test_worker_list)
            print(round,result_test)

 
if __name__=="__main__":
    from data_load import DataManager
    dm = DataManager()
    dm.load_data()
    server = FLServer()
    server.start_federated(dm.allocate_data_avg())
