import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset_processing import average_weights
import random
from options import args_parser
from pca2 import pca
from local_update import text_save

args = args_parser()
if args.clients_num == 10:
    BATCH_SIZE = 2
    LR = 0.1                  # learning rate
    EPSILON = 0.9            # 最优选择动作百分比
    GAMMA = 0.9               # 奖励递减参数
    TARGET_REPLACE_ITER = 1   # Q现实网络的更新频率
    client_MEMORY_CAPACITY = 5       # 记忆库大小，客户端
    epoch_MEMORY_CAPACITY = 5       # 记忆库大小，迭代次数
    client_N_ACTIONS = 10     # 能做的动作(即能选的设备)（客户端）
    epoch_N_ACTIONS = 5       # 能做的动作(即能选的本地迭代次数)
    N_STATES = 111            # 加上本地迭代次数的一个状态
    client_Opt_actions = 5    # 选取最优的N个设备
    epoch_Opt_actions = 1     # 选取最优的那个本地迭代次数
    n_components = 10
if args.clients_num == 100:
    BATCH_SIZE = 2
    LR = 0.1                  # learning rate
    EPSILON = 0.9            # 最优选择动作百分比
    GAMMA = 0.9               # 奖励递减参数
    TARGET_REPLACE_ITER = 1   # Q现实网络的更新频率
    client_MEMORY_CAPACITY = 1       # 记忆库大小
    epoch_MEMORY_CAPACITY = 5       # 记忆库大小
    client_N_ACTIONS = 100     # 能做的动作(即能选的设备)
    epoch_N_ACTIONS = 5       # 能做的动作(即能选的本地迭代次数)
    N_STATES = 10101            # 加上本地迭代次数的一个状态
    client_Opt_actions = 10    # 选取最优的N个设备
    epoch_Opt_actions = 1     # 选取最优的那个本地迭代次数
    n_components = 100

def choose_client_action(self, x):
    self.current_state = x
    x = torch.unsqueeze(torch.FloatTensor(x), 0).to(
        torch.float64)
    if np.random.uniform() < EPSILON:
        actions_value = self.client_eval_net.forward(x)
        _, indices = actions_value.sort(descending=True)
        actions = indices[0][:client_Opt_actions].tolist()
        print(actions_value)
        print("DDQN选择:", actions)
    else:  # 选随机s动作
        actions = random.sample(range(client_N_ACTIONS), client_Opt_actions)
        print("随机选择:", actions)
    # 返回所选动作的列表
    return actions


def choose_local_epoch_action(self, x):
    self.current_state = x
    x = torch.unsqueeze(torch.FloatTensor(x), 0).to(torch.float64)
    if np.random.uniform() < EPSILON:   # 选最优的本地迭代次数
        actions_value = self.epoch_eval_net.forward(x)
        _, indices = actions_value.sort(descending = True)
        actions = indices[0][:epoch_Opt_actions].tolist()
        print(actions_value)
        print("DDQN选择:", actions)
    else:
        actions = random.sample(range(epoch_N_ACTIONS), epoch_Opt_actions)
        print("随机选择:", actions)
    return actions



def first_client_epoch_selection_round(self, weights, loss, local_epoch, first_remain_resouces):
    self.i = 1
    self.first_remain_resouces = first_remain_resouces
    self.first_weights, self.selected_weights, self.selected_loss, self.train_loss = [], [], [], []
    self.first_local_losses = loss
    self.first_local_weights = weights
    self.total_client_index = []    #总的客户端索引号
    # 得到第一次所有参与设备的局部参数从而求得全局的参数（fedavg)
    first_global_weights = average_weights(self.first_local_weights) # 所有本地权重平均
    # first_local_global_weights[idx] 是一个字典，表示第 idx 个设备的局部参数，键代表了参数的名称或标识，值代表参数的张量值
    first_local_global_weights = self.first_local_weights # 所有本地权重字典
    # 得到第一次所有本地权重+全局权重
    first_local_global_weights.append(first_global_weights)
    # 记录网络的参数量
    for idx in range(len(first_local_global_weights)):
        key_list = list(first_local_global_weights[idx].keys())
        # print('字典中的key转换为列表：', key_list)
        value_list = list(first_local_global_weights[idx].values())
        # print('字典中的value转换为列表：', value_list)
        key_value_to_list = list(zip(key_list, value_list))  # 转换为键值对列表
        # print('key列表和value列表合并：', key_value_to_list)
        # 更新first_weights
        self.first_weights.append(key_value_to_list)
    # 把列表拉平
    weight_vecs = [flatten_weights(weight) for weight in self.first_weights]
    # PCA降到指定的维数
    # self.first_weights = pca(self.first_weights, n_components)
    self.first_weights = pca(weight_vecs, n_components) # pca将模型权重降到10维或100维
    # gc.collect()
    # 将参数拉平构成状态，多维转换为1维
    self.first_weights = self.first_weights.reshape(-1).tolist()
    # self.first_weights = trans_torch(self.first_weights).reshape(-1)
    # 把本地的迭代次数也作为状态加入
    self.first_weights.append(local_epoch)
    # 转换为张量
    self.first_weights = trans_torch(self.first_weights)

    # 根据状态（first_weights)对每个动作进行评估并选择合适价值较高的几个动作
    self.sample_client_index = self.choose_client_action(self.first_weights)
    # 根据状态对每个动作进行评估并选择价值最高的本地迭代次数
    self.sample_local_epoch = self.choose_local_epoch_action(self.first_weights)

    # 赋值作为当前的状态
    self.current_state = self.first_weights
    # 找出被选中设备的权重和loss
    for idx in range(len(self.sample_client_index)):
        self.selected_weights.append(self.first_local_weights[self.sample_client_index[idx]])
        self.selected_loss.append(self.first_local_losses[self.sample_client_index[idx]])
        # self.unsample_client_index.remove(self.sample_client_index[idx])    #保存没被选中的索引
    # 取选中的设备的权重加权平均作为全局模型参数还是取全部设备的权重作为加权平均后的全局权重，fedavge
    # self.global_weights = average_weights(self.selected_weights)
    self.global_weights = first_global_weights
    # 取选中的设备的loss加权平均作为全局模型的loss还是取全部设备的loss作为加权平均后的全局loss
    self.gloabl_loss_avg = sum(self.selected_loss) / len(self.selected_loss)
    # self.gloabl_loss_avg = sum(self.first_local_losses) / len(self.first_local_losses)
    print(f'Training Loss : {np.mean(np.array(self.gloabl_loss_avg))}')
    self.train_loss.append(self.gloabl_loss_avg)

    # 对应公式（19）
    lamda_T = 0.6 + 0.4 * (args.total_resources - self.first_remain_resouces) / args.total_resources
    Omuga_T = lamda_T * (1-self.gloabl_loss_avg) / self.target_accuracy + (1 - lamda_T) * self.first_remain_resouces / args.total_resources - 1
    self.reward = pow(64, Omuga_T)

    # self.reward = (pow(64,((1-self.gloabl_loss_avg)-self.target_accuracy)) - 1)*10
    print("当前奖励值:", self.reward)
    self.R = self.R + pow(GAMMA,(self.i-1))*self.reward
    # self.R = GAMMA * self.R + self.reward
    print("累计奖励值:", self.R)
    # 获取当前状态
    # s = self.current_state
    # 返回选定的客户端索引号、全局权重、选定的本地迭代次数、累计奖励值和本次奖励值
    return self.sample_client_index, self.global_weights, self.sample_local_epoch, self.R, self.reward
