import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset_processing import average_weights
import random
from options import args_parser
from pca2 import pca
from local_update import text_save
import gc
# 用来判断使用哪一种参数的
args = args_parser()
if args.clients_num == 10:
    BATCH_SIZE = 2
    LR = 0.1                  # learning rate
    EPSILON = 0.9            # 最优选择动作百分比
    GAMMA = 0.9               # 奖励递减参数
    TARGET_REPLACE_ITER = 1   # Q现实网络的更新频率
    client_MEMORY_CAPACITY = 5       # 记忆库大小，客户端
    epoch_MEMORY_CAPACITY = 5       # 记忆库大小，迭代次数
    client_N_ACTIONS = 10     # 能做的动作(即能选的设备)（客户端）
    epoch_N_ACTIONS = 5       # 能做的动作(即能选的本地迭代次数)
    N_STATES = 111            # 加上本地迭代次数的一个状态
    client_Opt_actions = 5    # 选取最优的N个设备
    epoch_Opt_actions = 1     # 选取最优的那个本地迭代次数
    n_components = 10
if args.clients_num == 100:
    BATCH_SIZE = 2
    LR = 0.1                  # learning rate
    EPSILON = 0.9            # 最优选择动作百分比
    GAMMA = 0.9               # 奖励递减参数
    TARGET_REPLACE_ITER = 1   # Q现实网络的更新频率
    client_MEMORY_CAPACITY = 1       # 记忆库大小
    epoch_MEMORY_CAPACITY = 5       # 记忆库大小
    client_N_ACTIONS = 100     # 能做的动作(即能选的设备)
    epoch_N_ACTIONS = 5       # 能做的动作(即能选的本地迭代次数)
    N_STATES = 10101            # 加上本地迭代次数的一个状态
    client_Opt_actions = 10    # 选取最优的N个设备
    epoch_Opt_actions = 1     # 选取最优的那个本地迭代次数
    n_components = 100


def trans_torch(x):
    x = torch.FloatTensor(x)
    return x

# 将权重展平为向量的形式
def flatten_weights(weights):
    # Flatten weights into vectors
    weight_vecs = []
    for _, weight in weights:
        weight_vecs.extend(weight.flatten().tolist()) # weight.flatten() 是一个 NumPy 数组的方法，用于将权重多维数组展平为一维数组
    return np.array(weight_vecs)

class DQN(object):
    def __init__(self):
        # 建立 target net 和 eval net 还有 memory
        # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.target_accuracy = 0.9
        self.R = 0 # 累计回报初始值为0，在执行一个动作序列后，所有奖励的总和
        # 建立了四个神经网络，分别是 client 的评估网络（client_eval_net）、client 的目标网络（client_target_net）、epoch 的评估网络（epoch_eval_net）、epoch 的目标网络（epoch_target_net），对应客户端选择和本地迭代次数
        self.client_eval_net, self.client_target_net, self.epoch_eval_net, self.epoch_target_net \
            = client_Net(), client_Net(), epoch_Net(), epoch_Net()
        self.client_learn_step_counter, self.epoch_learn_step_counter = 0, 0     # 于计算 target 网络更新的计数器，初始值都设置为0
        self.client_memory_counter, self.epoch_memory_counter = 0, 0         # 记录记忆库的计数器，用于记录记忆库中的存储量
        self.client_memory = np.zeros((client_MEMORY_CAPACITY, N_STATES * 2 + 2))     # 初始化记忆库，几行几列
        self.epoch_memory = np.zeros((epoch_MEMORY_CAPACITY, N_STATES * 2 + 2))  # 初始化记忆库
        self.client_optimizer = torch.optim.Adam(self.client_eval_net.parameters(), lr=LR)    # torch 的优化器，使用adam
        self.epoch_optimizer = torch.optim.Adam(self.epoch_eval_net.parameters(), lr=LR)  # torch 的优化器，使用adam
        self.client_loss_func = nn.MSELoss()   # 误差公式
        self.epoch_loss_func = nn.MSELoss()  # 误差公式
        self.DQNiteration = 10
        self.epoch_loss, self.client_loss = [], []

    # def parameter_preprocessing(self, x):
    #     L = []
    #     self.local_weights = x
    #     for i in range(len(x)):
    #         data = x[i]
    #         A = list(data.values())[:4] #展开公式.flatten()
    #         data0 = np.array(A[0])
    #         data1 = np.array(A[1])
    #         data2 = np.array(A[2])
    #         data3 = np.array(A[3])
    #         data00 = []
    #         data01 = []
    #         for j in range(len(data0)):
    #             data00[j:] = data0[j:] + data1[j]
    #         for k in range(len(data2)):
    #             data01[k:] = data2[k:] + data3[k]
    #         data00 = np.array(data00)
    #         data01 = np.array(data01)
    #         data03 = data00.T + data01
    #         data03 = np.array(data03)#得到的是1682*100维的数据
    #         data04 = data03.sum(axis=0)#得到的是1*100维的数据
    #         data05 = np.array(data04 - data04.min()) / (data04.max() - data04.min())#数据归一化
    #         # data05 = np.array(data05)
    #         #把多维数据整成一维数据
    #         # l = []
    #         # for m in range(len(data05)):
    #         #     for n in data05[m]:
    #         #         l.append(n)
    #         # l = np.array(l)
    #         L.append(data05)
    #     #L = np.array(L)
    #     # pca = PCA(n_components=100)
    #     L = torch.Tensor(L)
    #
    #     # pca.fit(L)
    #     # trans_L = pca.transform(L)
    #     # trans_L = np.array(trans_L)
    #     #变成一行
    #     L = L.reshape(-1)
    #     return L

    # 选择客户端动作，以大概率输入DQN选择动作，小概率随机选择动作
    def choose_client_action(self, x): # 引用中x为pca降维状态空间后的状态，当前的状态空间
        self.current_state = x
        x = torch.unsqueeze(torch.FloatTensor(x), 0).to(torch.float64) # 将输入的状态 x 转换为 PyTorch 的张量（tensor），并且添加一个额外的维度，使其成为一个批次（batch）的输入，而不是单个样本
        # x = x.to(torch.float64)
        # 这里只输入一个 sample
        # 选最优动作,一个 ε-greedy 策略，用于在探索和利用之间进行权衡。EPSILON 是一个探索率参数，控制了随机探索的频率。当随机数小于 EPSILON 时，代理会以一定的概率选择随机动作（探索），否则会选择基于当前策略的最优动作（利用）。
        if np.random.uniform() < EPSILON:
            # 将状态 x 输入到客户端评估网络（client_eval_net）中，得到对每个动作的预测值。
            actions_value = self.client_eval_net.forward(x)
            # 将预测值按降序排列（默认对张量最后一个维度排序），并返回排序后的索引。注意这个神经网络返回的是动作的值，所以可以比较
            _, indices = actions_value.sort(descending = True)
            # 从张量 indices 中提取的第一个子张量，从 indices[0] 中获取前 client_Opt_actions 个元素
            actions = indices[0][:client_Opt_actions].tolist()       #选取最优的Opt_actions个动作
            print(actions_value)
            print("DDQN选择:", actions) # 选择更新后的动作
            # action = torch.max(actions_value, 1)[1].data.numpy()[0]    # return the argmax
        else:   # 选随机s动作
            # action = np.random.randint(0, N_ACTIONS)
            # actions = [np.random.randint(0, N_ACTIONS) for i in range(5)]
            actions = random.sample(range(client_N_ACTIONS), client_Opt_actions)
            print("随机选择:", actions)
        # 返回所选动作的列表
        return actions

    # 选择本地迭代次数的动作，与上面choose_client_action结构一致
    def choose_local_epoch_action(self, x):
        self.current_state = x
        x = torch.unsqueeze(torch.FloatTensor(x), 0).to(torch.float64)
        # x = x.to(torch.float64)
        # 这里只输入一个 sample
        if np.random.uniform() < EPSILON:   # 选最优的本地迭代次数
            actions_value = self.epoch_eval_net.forward(x)
            _, indices = actions_value.sort(descending = True)
            actions = indices[0][:epoch_Opt_actions].tolist()       #选取最优的Opt_actions个动作,只能为1
            print(actions_value)
            print("DDQN选择:", actions)
            # action = torch.max(actions_value, 1)[1].data.numpy()[0]    # return the argmax
        else:   # 选随机s动作
            # action = np.random.randint(0, N_ACTIONS)
            # actions = [np.random.randint(0, N_ACTIONS) for i in range(5)]
            actions = random.sample(range(epoch_N_ACTIONS), epoch_Opt_actions)
            print("随机选择:", actions)
        return actions

    # 第一轮客户端的选择过程，返回选定的客户端索引号、全局权重、选定的本地迭代次数、累计奖励值和本次奖励值
    # 四个参数：客户端的权重参数 weights、损失 loss、本地迭代次数 local_epoch 和第一次剩余资源 first_remain_resouces
    # 处理的first_weights对应状态空间s_t，包含每个客户端的参数、fedavg参数、local_epoch
    def first_client_epoch_selection_round(self, weights, loss, local_epoch, first_remain_resouces):
        self.i = 1
        self.first_remain_resouces = first_remain_resouces
        self.first_weights, self.selected_weights, self.selected_loss, self.train_loss = [], [], [], []
        self.first_local_losses = loss
        self.first_local_weights = weights
        self.total_client_index = []    #总的客户端索引号
        # 得到第一次所有参与设备的局部参数从而求得全局的参数（fedavg)
        first_global_weights = average_weights(self.first_local_weights) # 所有本地权重平均
        # first_local_global_weights[idx] 是一个字典，表示第 idx 个设备的局部参数，键代表了参数的名称或标识，值代表参数的张量值
        first_local_global_weights = self.first_local_weights # 所有本地权重字典
        # 得到第一次所有本地权重+全局权重
        first_local_global_weights.append(first_global_weights)
        # 记录网络的参数量
        for idx in range(len(first_local_global_weights)):
            key_list = list(first_local_global_weights[idx].keys())
            # print('字典中的key转换为列表：', key_list)
            value_list = list(first_local_global_weights[idx].values())
            # print('字典中的value转换为列表：', value_list)
            key_value_to_list = list(zip(key_list, value_list))  # 转换为键值对列表
            # print('key列表和value列表合并：', key_value_to_list)
            # 更新first_weights
            self.first_weights.append(key_value_to_list)
        # 把列表拉平
        weight_vecs = [flatten_weights(weight) for weight in self.first_weights]
        # PCA降到指定的维数
        # self.first_weights = pca(self.first_weights, n_components)
        self.first_weights = pca(weight_vecs, n_components) # pca将模型权重降到10维或100维
        # gc.collect()
        # 将参数拉平构成状态，多维转换为1维
        self.first_weights = self.first_weights.reshape(-1).tolist()
        # self.first_weights = trans_torch(self.first_weights).reshape(-1)
        # 把本地的迭代次数也作为状态加入
        self.first_weights.append(local_epoch)
        # 转换为张量
        self.first_weights = trans_torch(self.first_weights)

        # 根据状态（first_weights)对每个动作进行评估并选择合适价值较高的几个动作
        self.sample_client_index = self.choose_client_action(self.first_weights)
        # 根据状态对每个动作进行评估并选择价值最高的本地迭代次数
        self.sample_local_epoch = self.choose_local_epoch_action(self.first_weights)

        # 赋值作为当前的状态
        self.current_state = self.first_weights
        # 找出被选中设备的权重和loss
        for idx in range(len(self.sample_client_index)):
            self.selected_weights.append(self.first_local_weights[self.sample_client_index[idx]])
            self.selected_loss.append(self.first_local_losses[self.sample_client_index[idx]])
            # self.unsample_client_index.remove(self.sample_client_index[idx])    #保存没被选中的索引
        # 取选中的设备的权重加权平均作为全局模型参数还是取全部设备的权重作为加权平均后的全局权重，fedavge
        # self.global_weights = average_weights(self.selected_weights)
        self.global_weights = first_global_weights
        # 取选中的设备的loss加权平均作为全局模型的loss还是取全部设备的loss作为加权平均后的全局loss
        self.gloabl_loss_avg = sum(self.selected_loss) / len(self.selected_loss)
        # self.gloabl_loss_avg = sum(self.first_local_losses) / len(self.first_local_losses)
        print(f'Training Loss : {np.mean(np.array(self.gloabl_loss_avg))}')
        self.train_loss.append(self.gloabl_loss_avg)

        # 对应公式（19）
        lamda_T = 0.6 + 0.4 * (args.total_resources - self.first_remain_resouces) / args.total_resources
        Omuga_T = lamda_T * (1-self.gloabl_loss_avg) / self.target_accuracy + (1 - lamda_T) * self.first_remain_resouces / args.total_resources - 1
        self.reward = pow(64, Omuga_T)

        # self.reward = (pow(64,((1-self.gloabl_loss_avg)-self.target_accuracy)) - 1)*10
        print("当前奖励值:", self.reward)
        self.R = self.R + pow(GAMMA,(self.i-1))*self.reward
        # self.R = GAMMA * self.R + self.reward
        print("累计奖励值:", self.R)
        # 获取当前状态
        # s = self.current_state
        # 返回选定的客户端索引号、全局权重、选定的本地迭代次数、累计奖励值和本次奖励值
        return self.sample_client_index, self.global_weights, self.sample_local_epoch, self.R, self.reward

    # 与first_client_epoch_selection_round输入参数相同
    def client_epoch_selection_round(self, weights, loss, local_epoch, remain_resouces):
        self.remain_resouces = remain_resouces
        self.next_weights = []
        self.i = self.i + 1
        self.local_weights = self.first_local_weights
        self.local_losses = self.first_local_losses
        # 将选取的客户端替换到之前备份的权重和loss部分
        for idx in range(len(weights)):
            Idx = self.sample_client_index[idx]
            self.local_weights[Idx] = weights[idx]
            self.local_losses[Idx] = loss[idx]

        # 数组长度维11，因为最后聚合的全局权重放在ID第10的位置，新得到的要聚合前0~9的序列设备（选择的客户端数为10）
        self.local_weights[len(self.local_weights)-1] = average_weights(self.local_weights[0:len(self.local_weights)-2])

        # 获取当前一个transition
        s = self.current_state # 当前状态空间
        a = self.sample_client_index # 选中的客户端索引
        r = self.reward # 当前奖励
        #key_list, value_list, key_value_to_list = [], [], []
        # 记录网络的参数量
        # 与前面177-195相似
        for idx in range(len(self.local_weights)):
            key_list = list(self.local_weights[idx].keys())
            # print('字典中的key转换为列表：', key_list)
            value_list = list(self.local_weights[idx].values())
            # print('字典中的value转换为列表：', value_list)
            key_value_to_list = list(zip(key_list, value_list))  # 转换为列表
            # print('key列表和value列表合并：', key_value_to_list)
            self.next_weights.append(key_value_to_list)

        # 把列表拉平
        weight_vecs = [flatten_weights(weight) for weight in self.next_weights]
        # PCA降到指定的维数
        # self.first_weights = pca(self.first_weights, n_components)
        s_ = pca(weight_vecs, n_components)

        # 新的状态是由模型参数和上一次的本地迭代次数所组成的（很重要）
        # s_ = pca(self.next_weights, n_components)
        # 将参数拉平构成状态，多维转换为1维
        s_ = s_.reshape(-1).tolist()
        # 把本地的迭代次数也作为状态加入
        s_.append(local_epoch)
        # 转换为张量
        s_ = trans_torch(s_)
        # s_ = trans_torch((pca(self.next_weights, n_components)).reshape(-1).tolist().append(local_epoch))
        # s_ = trans_torch(pca(self.next_weights, n_components)).reshape(-1)
        # s_ = self.parameter_preprocessing(self.local_weights)



        # 存储client的transition，用来决定client的抉择
        for i in self.sample_client_index:
            self.client_store_transition(s, i, r, s_)
        if self.client_memory_counter > client_MEMORY_CAPACITY:
            # for i in range(self.DQNiteration):
               self.client_learn() # 记忆库满了就进行学习

        # 存储epoch的transition，用来决定本地迭代次数的选择
        for i in self.sample_local_epoch:
            self.epoch_store_transition(s, i, r, s_)
        if self.epoch_memory_counter > epoch_MEMORY_CAPACITY:
            # for i in range(self.DQNiteration):
               self.epoch_learn() # 记忆库满了就进行学习

        # 选取下一个动作
        self.sample_client_index = self.choose_client_action(s_)
        # 选取下一个本地迭代次数
        self.sample_local_epoch = self.choose_local_epoch_action(s_)

        self.selected_weights, self.selected_loss = [],[]
        #根据状态采样新的设备进行下一次迭代更新
        for idx in range(len(self.sample_client_index)):
            self.selected_weights.append(self.local_weights[self.sample_client_index[idx]])
            # self.selected_loss.append(self.local_loss[self.sample_client_index[idx]])
            # self.unsample_client_index.remove(self.sample_client_index[idx])    #保存没被选中的索引
        # update global weights
        # self.global_weights = average_weights(self.selected_weights)
        self.global_weights = average_weights(self.local_weights)
        # train loss
        self.gloabl_loss_avg = sum(loss) / len(loss)
        # self.gloabl_loss_avg = sum(self.local_losses) / len(self.local_losses)
        print(f'Training Loss : {np.mean(np.array(self.gloabl_loss_avg))}')
        self.train_loss.append(self.gloabl_loss_avg)
        # self.gloabl_loss_avg = 0.1
        lamda_T = 0.6 + 0.4 * (args.total_resources - self.remain_resouces) / args.total_resources
        Omuga_T = lamda_T * (1 - self.gloabl_loss_avg) / self.target_accuracy + (1 - lamda_T) * self.remain_resouces / args.total_resources - 1
        # Omuga_T = (1 - self.gloabl_loss_avg) / self.target_accuracy
        self.reward = pow(64, Omuga_T)
        # self.reward = (pow(64,((1-self.gloabl_loss_avg)-self.target_accuracy)) - 1 )
        # self.reward = pow(64, (self.target_accuracy - (1 - self.gloabl_loss_avg)))
        print("当前奖励值:", self.reward)
        # self.R = GAMMA*self.R + self.reward
        self.R = self.R + pow(GAMMA, (self.i - 1)) * self.reward
        print("累计奖励值:", self.R)
        #把得到的新的状态赋给当前状态，以此作为下一次迭代时的上一次状态
        self.current_state = s_
        self.first_local_weights = self.local_weights
        self.first_local_losses = self.local_losses
        # 这里相比上面多了self.local_weights,和self.gloabl_loss_avg
        return self.sample_client_index, self.global_weights, self.local_weights, self.gloabl_loss_avg, self.sample_local_epoch, self.R, self.reward

    def epoch_store_transition(self, s, a, r, s_):
        transition = np.hstack((s, [a, r], s_))
        # 如果记忆库满了, 就覆盖老数据
        index = self.epoch_memory_counter % epoch_MEMORY_CAPACITY
        self.epoch_memory[index, :] = transition
        self.epoch_memory_counter += 1

    '''client_DDQN网络的过程'''
    def epoch_learn(self):
        print("dqn网络更新")
       # target net 参数更新
        if self.epoch_learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.epoch_target_net.load_state_dict(self.epoch_eval_net.state_dict())
        self.epoch_learn_step_counter += 1

        # 抽取记忆库中的批数据
        sample_index = np.random.choice(epoch_MEMORY_CAPACITY, BATCH_SIZE)
        b_memory = self.epoch_memory[sample_index, :]
        b_s = torch.Tensor(b_memory[:, :N_STATES])
        b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
        b_r = torch.Tensor(b_memory[:, N_STATES+1:N_STATES+2])
        b_s_ = torch.Tensor(b_memory[:, -N_STATES:])

        # 针对做过的动作b_a, 来选 q_eval 的值, (q_eval 原本有所有动作的值)
        q_eval = self.epoch_eval_net(b_s).gather(1, b_a)  # shape (batch, 1)
        q_next = self.epoch_target_net(b_s_).detach()     # q_next 不进行反向传递误差, 所以 detach
        q_target = b_r + GAMMA * q_next.max(1)[0]   # shape (batch, 1)
        loss = self.epoch_loss_func(q_eval, q_target)
        # 存储epoch_DQN网络的损失
        self.epoch_loss.append(torch.Tensor(loss).tolist())
        # 每次存数据之前，清除之前存的
        file = open("epochDQN_loss.txt", 'w').close()
        text_save('epochDQN_loss.txt', self.epoch_loss)
        # 计算, 更新 eval net
        self.epoch_optimizer.zero_grad()
        loss.backward()
        self.epoch_optimizer.step()


    def client_store_transition(self, s, a, r, s_):
        transition = np.hstack((s, [a, r], s_))
        # 如果记忆库满了, 就覆盖老数据
        index = self.client_memory_counter % client_MEMORY_CAPACITY
        self.client_memory[index, :] = transition
        self.client_memory_counter += 1

    '''client_DDQN网络的过程：
1.使用两个神经网络：self.client_eval_net 和 self.client_target_net 分别代表评估网络和目标网络。
2.目标网络参数更新：根据一定的步数（TARGET_REPLACE_ITER），将评估网络的参数复制给目标网络，以保持目标网络的稳定。
3.计算下一个状态的动作价值时，使用目标网络产生的动作价值（q_next），而不是直接使用评估网络产生的动作价值。这样可以减少估计过高的问题，提高算法的稳定性。
4.损失函数的计算和优化过程：根据评估网络产生的动作价值和目标动作价值，计算损失并执行反向传播优化过程，从而更新评估网络的参数。'''
    def client_learn(self):
        print("dqn网络更新")
       # target net 参数更新
        if self.client_learn_step_counter % TARGET_REPLACE_ITER == 0:
            self.client_target_net.load_state_dict(self.client_eval_net.state_dict())
        self.client_learn_step_counter += 1

        # 抽取记忆库中的批数据
        sample_index = np.random.choice(client_MEMORY_CAPACITY, BATCH_SIZE)
        b_memory = self.client_memory[sample_index, :]
        b_s = torch.Tensor(b_memory[:, :N_STATES])
        b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))
        b_r = torch.Tensor(b_memory[:, N_STATES+1:N_STATES+2])
        b_s_ = torch.Tensor(b_memory[:, -N_STATES:])

        # 针对做过的动作b_a, 来选 q_eval 的值, (q_eval 原本有所有动作的值)
        q_eval = self.client_eval_net(b_s).gather(1, b_a)  # shape (batch, 1)
        q_next = self.client_target_net(b_s_).detach()     # q_next 不进行反向传递误差, 所以 detach
        q_target = b_r + GAMMA * q_next.max(1)[0]   # shape (batch, 1)
        loss = self.client_loss_func(q_eval, q_target)
        # 存储client_DQN网络的损失
        self.client_loss.append(torch.Tensor(loss).tolist())
        # 每次存数据之前，清除之前存的
        file = open("clientDQN_loss.txt", 'w').close()
        text_save('clientDQN_loss.txt', self.client_loss)
        # 计算, 更新 eval net
        self.client_optimizer.zero_grad()
        loss.backward()
        self.client_optimizer.step()

class client_Net(nn.Module):
    def __init__(self):
        super(client_Net, self).__init__()
        self.fc1 = nn.Linear(N_STATES, 10) # 输入状态空间，N_STATES个（111）
        self.fc1.weight.data.normal_(0, 0.1)  # 对第一个全连接层initialization，正态分布
        self.out = nn.Linear(10, client_N_ACTIONS) # client_N_ACTIONS个动作空间（10）
        self.out.weight.data.normal_(0, 0.1)  # initialization，输出的是动作预测值

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        actions_value = self.out(x)
        return actions_value


class epoch_Net(nn.Module):
    def __init__(self):
        super(epoch_Net, self).__init__()
        self.fc1 = nn.Linear(N_STATES, 10)
        self.fc1.weight.data.normal_(0, 0.1)  # initialization
        self.out = nn.Linear(10, epoch_N_ACTIONS)
        self.out.weight.data.normal_(0, 0.1)  # initialization

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        actions_value = self.out(x)
        return actions_value

# 此类并未调用
class suiji_client(object):
    def __init__(self):
        self.first_local_losses, self.first_local_weights = [], []

    def first_client_ramdomly_selection_round(self, global_weights, weights, loss, client_Num, num):
        self.sample_client_index = random.sample(range(client_Num), num)
        self.first_weights, self.selected_weights, self.selected_loss, self.train_loss = [], [], [], []
        self.first_local_losses = loss
        self.first_local_weights = weights
        for idx in range(len(self.first_local_weights)):
            self.first_local_weights[idx] = global_weights
        # 得到第一次所有参与设备的局部参数从而求得全局的参数
        self.global_weights = average_weights(self.first_local_weights)
        # self.sample_client_index = [0, 2, 4, 6, 8]
        return self.sample_client_index, global_weights

    def client_ramdomly_selection_round(self, weights, loss, client_Num, num):
        self.local_weights = self.first_local_weights
        self.local_losses = self.first_local_losses
        # 将选取的设备替换到之前备份的权重和loss部分
        for idx in range(len(weights)):
            Idx = self.sample_client_index[idx]
            self.local_weights[Idx] = weights[idx]
            self.local_losses[Idx] = loss[idx]
        #加权聚合得到全局的参数
        self.global_weights = average_weights(self.local_weights)
        # train loss
        self.gloabl_loss_avg = sum(loss) / len(loss)
        self.sample_client_index = random.sample(range(client_Num), num)
        # self.sample_client_index = [0, 2, 4, 6, 8]
        return self.sample_client_index, self.global_weights, self.local_weights, self.gloabl_loss_avg