# 服务器，用来控制训练过程
from numpy.core.defchararray import join
from FLClient import FLClient
import pandas as pd 
import numpy as np 
from sklearn.utils import shuffle
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import torch 
from torch import nn, optim
from dnn_model import Net
from imblearn.over_sampling import RandomOverSampler ,SMOTE, ADASYN,BorderlineSMOTE, KMeansSMOTE, SVMSMOTE

import copy

class FLServer():
    # 配置学习过程中的一些参数
    def __init__(self):
        # 服务器端定义模型，优化器
        self.model = Net()
        self.optimizer = optim.Adam(self.model.parameters(),lr=0.001)
        # 以下优化器表示添加了正则项。
        # self.optimizer = optim.Adam(self.model.parameters(),lr=0.001,weight_decay=0.000001)


        # 策略控制。用于选择具体的策略。
        # 某个策略内部具体的参数，可以在执行函数中进行控制
        # 数据策略。0表示随机平衡数据。1表示不平衡数据
        self.data_strategy = 1
        # 聚合策略。0表示fedavg,1表示ida
        self.aggrerate_strategy = 0

    # 1 加载数据、随机分成多份训练数据和一份测试数据。主要在试验中用来分配数据
    def load_data(self):
        # 加载数据
        data_pd = pd.read_csv('../../data/maldroid2020/feature_vectors_syscallsbinders_frequency_5_Cat.csv')
        data_np = data_pd.values


        # 随机打乱数据
        data_np = shuffle(data_np) # random the dataset

        # 特征和标签分离
        x,y = data_np[:,0:470],data_np[:,470]-1
        # print(y[50:])

        # 过采样平衡数据集、对小类别的数据进行重复过采样
        ros = RandomOverSampler(random_state=0)
        # ros = SMOTE(random_state=0)
        # ros = ADASYN(random_state=0)
        # ros = BorderlineSMOTE(random_state=0, kind="borderline-1")
        # ros = BorderlineSMOTE(random_state=0, kind="borderline-2")
        # ros = KMeansSMOTE(random_state=0)
        # ros = SVMSMOTE(random_state=0),
        x, y = ros.fit_resample(x, y)
        # print(pd.value_counts(y))

        # 数据最大最小值归一化
        minmax_scaler = preprocessing.MinMaxScaler()
        x = minmax_scaler.fit_transform(x)

        # 将归一化的数据合并到一块
        y = y.reshape(y.shape[0],1)
        data_np = np.hstack([x,y])

        # 将数据分成训练数据和测试数据
        mask = np.random.choice([True,False],p=[0.8,0.2],size=[data_np.shape[0],])
        data_train,data_test = data_np[mask],data_np[~mask]

        return data_train,data_test
    
    # 将数据平均分成join_size份
    def allocate_data_avg(self,data_train,join_size):
        # 数据随机选择
        data_join = []
        data_counts = 2000
        for i in range(join_size):
            idx = np.random.choice(range(data_train.shape[0]),data_counts,replace=False)
            data_join.append(data_train[idx])
        
        return data_join

    def allocate_data_noniid(self,data_train,join_size):
        # 数据随机选择
        data_join = []
        data_counts = 2000

        
        # 每个客户端随机选择3个标签
        label_total=5
        label_size=3
        label_list = [np.random.choice(range(label_total),label_size,replace=False) for i in range(join_size)]

        # 每个类别的数据放到一块
        data_class = []
        for i in range(label_total):
            data_class.append(data_train[data_train[:,-1]==i])
        data_class = np.array(data_class)
        # 给每个客户端选择指定数量数据，限制在某一类别。从lable_list确定的3个类中，选择数据。
        for i in label_list:
            # 把挑选出来的数据合成一组。
            data_temp = np.vstack(data_class[i])
            size = data_temp.shape[0]
            idx = np.random.choice(range(size),data_counts,replace=False)
            data_join.append(data_temp[idx])

        return data_join


    # 联邦平均算法聚合模型
    def aggrerate_avg(self,model_state_dict_list,optim_state_dict_list):
        # fed_avg算法
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape)
        
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name]+= model_dict[name]*pi
        
        self.model.load_state_dict(copy.deepcopy(new_par))
    
    # 逆距离聚合
    def aggrerate_idx(self,model_state_dict_list,optim_state_dict_list):
        new_par = copy.deepcopy(self.model.state_dict())
        pi = torch.tensor(1.0/len(model_state_dict_list))
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape)
        
        for model_dict in model_state_dict_list:
            for name in new_par:
                new_par[name] += model_dict[name]*pi
        
        # 实现3个副本
        w_Avg = copy.deepcopy(new_par)
        w_Avg_w = copy.deepcopy(new_par)
        Z = copy.deepcopy(new_par)

        # 将3 个副本初始化为0
        for name in new_par:
            new_par[name] = torch.zeros(new_par[name].shape)
            w_Avg_w[name] = torch.zeros(new_par[name].shape)
            Z[name] = torch.zeros([])

        for model_dict in model_state_dict_list:
            #w_avg - w 表示距离平均值的距离
            for name in new_par:
                w_Avg_w[name] = w_Avg[name] - model_dict[name]
                w_Avg_w[name].flatten()
                w_Avg_w[name] = torch.norm(w_Avg_w[name], p=1)
                Z[name] += 1/w_Avg_w[name]

        for par in model_state_dict_list:
            for name in new_par:
                new_par[name] += (1/torch.norm((w_Avg[name]-par[name]).flatten(),p=1))*(1/Z[name])*par[name]
        
        self.model.load_state_dict(copy.deepcopy(new_par))

    # prox聚合方法
    def aggrerate_prox(self,model_state_dict_list,optim_state_dict_list):
        if False:
            for idx, par in enumerate(model_state_dict_list):
                for name in new_par:
                    new_par[name] += par[name] /len(model_state_dict_list)
            w_Avg = copy.deepcopy(new_par)
            w_Avg_w = copy.deepcopy(new_par)
            Z = copy.deepcopy(new_par)

            for name in new_par:
                new_par[name] = torch.zeros(new_par[name].shape).to(self.device)
                w_Avg_w[name] = torch.zeros(new_par[name].shape).to(self.device)
                Z[name] = torch.zeros([]).to(self.device)
 
            for idx, par in enumerate(model_par):
                #w_avg - w
                for name in new_par:
                    w_Avg_w[name] = w_Avg[name] - par[name]
                    w_Avg_w[name].flatten()
                    w_Avg_w[name] = torch.norm(w_Avg_w[name], p=1)
                    Z[name] += 1/w_Avg_w[name]

            for idx, par in enumerate(model_par):
                for name in new_par:
                    new_par[name] += (1/torch.norm((w_Avg[name]-par[name]).flatten(),p=1))*(1/Z[name])*par[name]
    
    # 优化器参数聚合
    def aggrerate_optimizer(self,model_state_dict_list,optim_state_dict_list):
        new_opt_par = copy.deepcopy(optim_state_dict_list[0])
        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        #print(new_opt_par["param_groups"][0][ 'params'])#这是模型每一层参数的索引，是一个list
        #print(new_opt_par["param_groups"][0])#这是模型每一层参数的索引，是一个list,state param_groups
        
        # 将所有的参数初始化为0
        for index in new_opt_par["param_groups"][0][ 'params']:

            new_opt_par["state"][index]['step'] = 0
            new_opt_par["state"][index]['exp_avg'] = torch.zeros(new_opt_par["state"][index]['exp_avg'].shape)
            new_opt_par["state"][index]['exp_avg_sq'] = torch.zeros(new_opt_par["state"][index]['exp_avg_sq'].shape)
            #print(new_opt_par["state"][index]) #这是index这一层神经网络的参数:step,vt,mt,是一个Dict

        for idx, par in enumerate(optim_state_dict_list):
            pi = torch.tensor(1.0/len(model_state_dict_list))
            for i in range(len(new_opt_par["param_groups"][0][ 'params'])):
                index = new_opt_par["param_groups"][0]['params'][i] #第i层参数的索引
                index2 = par["param_groups"][0]['params'][i] #第i层参数的索引
                new_opt_par["state"][index]['step'] += par["state"][index2]['step'] * pi 
                new_opt_par["state"][index]['exp_avg'] += par["state"][index2]['exp_avg']* pi 
                new_opt_par["state"][index]['exp_avg_sq'] += par["state"][index2]['exp_avg_sq']* pi 

        #print(new_opt_par["state"]) #这是每层神经网络的参数:step,vt,mt,是一个Dict
        self.optimizer.load_state_dict(copy.deepcopy(new_opt_par))

    # 定义了训练的整个过程。
    def start_federated(self):
        # 当前训练的参数
        rounds =200 # 训练的轮次
        join_size = 20 # 参与客户端的数量

        # 加载数据
        data_train,data_test = self.load_data()

        # 数据策略选择
        if(self.data_strategy==0):
            data_join=self.allocate_data_avg(data_train,join_size)
        elif(self.data_strategy==1):
            data_join=self.allocate_data_noniid(data_train,join_size)

        # 创建多个客户端列表
        worker_list = [FLClient(data_join[i]) for i in range(join_size)]
        worker_test = FLClient(data_test)


        # 迭代以下过程-----
  
        # 客户端进行训练

        for round in range(rounds):
            results_model = []
            results_optim = []

            # 发送模型。为每一个客户端发送一组新的模型复制
            for worker in worker_list:
                worker.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            # 训练模型
            for worker in worker_list:
                worker.train()
            
            # 返回训练参数
            for worker in worker_list:
                results_model.append(worker.get_model_state_dict())
                results_optim.append(worker.get_optim_state_dict())
            # 聚合训练参数。聚合策略的选择
            if(self.aggrerate_strategy==0):
                self.aggrerate_avg(results_model,results_optim)
            elif(self.aggrerate_strategy==1):
                self.aggrerate_idx(results_model,results_optim)
            elif(self.aggrerate_strategy==2):
                self.aggrerate_prox(results_model,results_optim)
            self.aggrerate_optimizer(results_model,results_optim)
            # 对模型进行验证
            worker_test.load_model(copy.deepcopy(self.model.state_dict()),copy.deepcopy(self.optimizer.state_dict()))
            result_test = worker_test.test()
            print(round,result_test)

 
if __name__=="__main__":

    server = FLServer()
    server.start_federated()
