# -*- coding: utf-8 -*-
# Author: Jodven
# Creation Date: 2021/10/13
import copy
import time
import numpy as np
from tqdm import tqdm
import torch
from itertools import chain
import matplotlib.pyplot as plt

from options import args_parser
from dataset_processing import sampling, average_weights
from user_cluster_recommend import recommend, Oracle_recommend
from local_update import DQN_LocalUpdate, cache_hit_ratio, text_save
from model import SAE
from utils import exp_details, ModelManager, count_top_items
from Thompson_Sampling import thompson_sampling
from data_set import convert
from DDQN import DQN
import random
if __name__ == '__main__':

    # 开始时间
    start_time = time.time()
    # args & 输出实验参数
    args = args_parser()
    exp_details(args)
    # gpu or cpu
    # if args.gpu: torch.cuda.set_device(args.gpu)
    # device = 'cuda' if args.gpu else 'cpu'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # load sample users_group_train users_group_test
    sample, users_group_train, users_group_test = sampling(args)
    data_set = np.array(sample)
    # A = data_set[:, 1:]
    # test_dataset & test_dataset_idx
    test_dataset_idxs = []
    for idx in range(args.clients_num):
        test_dataset_idxs.append(users_group_test[idx])
    test_dataset_idxs = list(chain.from_iterable(test_dataset_idxs))
    test_dataset = data_set[test_dataset_idxs]

    # 采用SAE算法来寻找用户之间的相似度来实现推荐的
    # global_model = SAE(int(max(data_set[:, 1])), 100)
    if args.dataset == 'Anime':
        global_model = SAE(34472, 100)
    # Set the model to train and send it to device.
    global_model.to(device)
    global_model.train()
    print(global_model)

    # copy weights
    global_weights = global_model.state_dict()

    # 用来存储本地迭代次数的参数
    selected_w_all_epochs = dict([(k, []) for k in range(args.epochs)])
    # 用来记录总的迭代次数的
    T = 0
    Num_epoch = []
    # Training loss
    train_loss = []
    #用来存储累计奖励和当前奖励的值
    C_Reward, Reward, Distribution = [], [], []
    w_all_epochs = dict([(k, []) for k in range(args.epochs)])
    first_local_weights, first_local_losses = [], []
    #第一次给出一个随机的本地迭代整数
    #first_local_epoch = np.array(args.local_ep)[random.randint(1, len(args.local_ep))-1]
    # 第一次给出一个很小的本地迭代整数
    first_local_epoch = 1
    # 初始化训练得到第一次的局部参数和损失
    for idx in range(args.clients_num):
        local_model = DQN_LocalUpdate(args=args, dataset=data_set, idxs=users_group_train[idx], local_epoch = first_local_epoch)
        w, loss = local_model.first_update_weights(model=copy.deepcopy(global_model), client_idx=idx + 1, global_round=0)
        first_local_weights.append(copy.deepcopy(w))
        first_local_losses.append(copy.deepcopy(loss))
    # 记录第一次的平均损失
    loss_avg = sum(first_local_losses) / len(first_local_losses)
    train_loss.append(loss_avg)
    #计算剩余资源
    remain_resouces = args.total_resources - args.clients_num * first_local_epoch * args.local_comsume - 2 * args.clients_num * args.global_comsume
    #记录总的迭代次数
    T = T + first_local_epoch
    #调用DQN类
    dqn = DQN()
    # 最后用来帮忙推荐电影的
    use_local_weights = dict([(k, []) for k in range(args.epochs - 1)])
    # 对本地迭代次数进行归一化
    ratio_first_local_epoch = first_local_epoch/(max(args.local_ep))
    # 这个只能得到本地迭代次数的序号
    sample_client_index, global_weights, num_epoch, C_reward, reward = dqn.first_client_epoch_selection_round(first_local_weights, first_local_losses, ratio_first_local_epoch, remain_resouces)

    #记录单次和累计奖励
    C_Reward.append(C_reward)
    Reward.append(reward)
    # 根据本地迭代次数的序号得到相应的迭代次数
    local_epoch = np.array(args.local_ep)[num_epoch][0]
    # 记录每次得到的本地迭代步数
    Num_epoch.append(local_epoch)
    # 开始总的本地全局聚合次数
    for epoch in tqdm(range(args.epochs)):
        # 本地模型的weights和losses
        selected_local_weights, selected_local_losses = [], []
        # 开始
        print(f'\n | Global Training Round : {epoch + 1} |\n')
        global_model.train()
        #得到选取设备的ID号
        if remain_resouces < 0:
           break
        else:
          for idx in range(len(sample_client_index)):
            Idx = sample_client_index[idx]
            local_model = DQN_LocalUpdate(args=args, dataset=data_set,
                                      idxs=users_group_train[Idx], local_epoch = local_epoch)
            w, loss = local_model.update_weights(
                model=copy.deepcopy(global_model), client_idx=Idx, global_round=epoch + 1)
            selected_local_weights.append(copy.deepcopy(w))
            selected_local_losses.append(copy.deepcopy(loss))
            selected_w_all_epochs[epoch].append(w['linear1.weight'].tolist())
        # 计算剩余资源
        remain_resouces = remain_resouces - len(sample_client_index) * local_epoch * args.local_comsume - 2 * len(sample_client_index) * args.global_comsume
        if remain_resouces > 0 and epoch < (args.epochs - 1):
            # 记录总的迭代次数
            T = T + local_epoch
            # 对本地迭代次数进行归一化
            ratio_local_epoch = local_epoch / (max(args.local_ep))
            # 用被选取的设备进行本地迭代得到新的被选取设备的ID以及新的全局参数
            sample_client_index, global_weights, local_weights, loss_avg, num_epoch, C_reward, reward = dqn.client_epoch_selection_round(selected_local_weights, selected_local_losses, ratio_local_epoch, remain_resouces)
            # 记录单次和累计奖励
            C_Reward.append(C_reward)
            Reward.append(reward)
            # 根据本地迭代次数的序号得到相应的迭代次数
            local_epoch = np.array(args.local_ep)[num_epoch][0]
            # 记录每次得到的本地迭代步数
            Num_epoch.append(local_epoch)
            for idx in range(args.clients_num):
                use_local_weights[epoch].append(local_weights[idx]['linear1.weight'].tolist())
            global_model.load_state_dict(global_weights)
            train_loss.append(loss_avg)
            print(f' \nAvg Training Stats after {epoch + 1} global rounds:')
            print(f'Training Loss : {np.mean(np.array(train_loss))}')

    ave_epoch = sum(Num_epoch) / len(Num_epoch)
    print("平均本地迭代次数:", ave_epoch)
    # 每次存数据之前，清除之前存的
    file = open("AFPC_loss.txt", 'w').close()
    text_save('AFPC_loss.txt', train_loss)
    # plt损失绘制, 去除被多计算一次的损失值
    # train_loss.pop(epoch + 1)
    plt.figure(figsize=(6, 6))
    # 设置坐标轴范围、名称
    plt.xlim(0, 1)
    plt.ylim(0, 0.3)
    if len(train_loss) == epoch:
       x1 = np.linspace(0, epoch-1, epoch)
    if len(train_loss) == epoch + 1:
       x1 = np.linspace(0, epoch, epoch+1)
    plt.xticks(x1)
    plt.plot(x1, train_loss, 'o-', label="Train_Loss")
    plt.title('Loss vs. epoches')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()

    # # 每次存数据之前，清除之前存的
    # file = open("C_Reward.txt", 'w').close()
    # text_save('C_Reward.txt', C_Reward)
    # # plt奖励绘制
    # # Rward.pop(epoch + 1)
    # plt.figure(figsize=(6, 6))
    # # 设置坐标轴范围、名称
    # # plt.xlim(0, 1)
    # # plt.ylim(0, 0.3)
    # if len(C_Reward) == epoch:
    #    x2 = np.linspace(0, epoch-1, epoch)
    # if len(C_Reward) == epoch + 1:
    #    x2 = np.linspace(0, epoch, epoch+1)
    # plt.xticks(x2)
    # plt.plot(x2, C_Reward, 'o-', label="reward_value")
    # plt.title('cul_reward_value vs. epoches')
    # plt.ylabel('reward_value')
    # plt.legend()
    # plt.show()

    # 每次存数据之前，清除之前存的
    file = open("Reward.txt", 'w').close()
    text_save('Reward.txt', Reward)
    # plt奖励绘制
    # Rward.pop(epoch + 1)
    plt.figure(figsize=(6, 6))
    # 设置坐标轴范围、名称
    if len(Reward) == epoch:
       x3 = np.linspace(0, epoch-1, epoch)
    if len(Reward) == epoch + 1:
       x3 = np.linspace(0, epoch, epoch+1)
    plt.xticks(x3)
    plt.plot(x3, Reward, 'o-', label="reward_value")
    plt.title('reward_value vs. epoches')
    plt.ylabel('reward_value')
    plt.legend()
    plt.show()


    # 每次存数据之前，清除之前存的
    file = open("C_Reward.txt", 'w').close()
    text_save('C_Reward.txt', C_Reward)
    # plt奖励绘制
    # Rward.pop(epoch + 1)
    plt.figure(figsize=(6, 6))
    # 设置坐标轴范围、名称
    if len(C_Reward) == epoch:
       x4 = np.linspace(0, epoch-1, epoch)
    if len(C_Reward) == epoch + 1:
       x4 = np.linspace(0, epoch, epoch+1)
    plt.xticks(x4)
    plt.plot(x4, C_Reward, 'o-', label="reward_value")
    plt.title('reward_value vs. epoches')
    plt.ylabel('reward_value')
    plt.legend()
    plt.show()

    # Caching size
    cachesize = args.cachesize
    # Recommend movies
    # FPCC / Oracle / m-e-greedy
    # dictionary index: client idx
    recommend_movies = dict([(k, []) for k in range(args.clients_num)])
    Oracle_recommend_movies = dict([(k, []) for k in cachesize])
    TS_recommend_movies = dict([(k, []) for k in cachesize])
    # cache efficiency
    # FPCC / random caching / Oracle caching / m-e-greedy / Thompson Sampling
    cache_efficiency = np.zeros(len(cachesize))
    random_cache_efficiency = np.zeros(len(cachesize))
    Oracle_cache_efficiency = np.zeros(len(cachesize))
    Greedy_cache_efficiency = np.zeros(len(cachesize))
    TS_cache_efficiency = np.zeros(len(cachesize))

    # algorithm  parameters
    # m-ε-greedy ε represents the probability to select files randomly from all the files.
    e = 0.3

    print('\n Caching Efficiency vs Cachesize')
    # recommend movies
    # 一个回合，已经训练好的AFPC
    # for idx in range(len(sample_client_index)):
    for idx in range(args.clients_num):
        # Idx = sample_client_index[idx]
        # test_dataset_i = data_set[users_group_test[Idx]]
        test_dataset_i = data_set[users_group_test[idx]]
        user_movie_i = convert(test_dataset_i, 34472)
        # recommend_movies[idx] = recommend(user_movie_i, test_dataset_i, selected_w_all_epochs[args.epochs - 1][idx])
        recommend_movies[idx] = recommend(user_movie_i, test_dataset_i, use_local_weights[epoch-2][idx])

        for c in cachesize:
            Oracle_recommend_movies[c].append(list(Oracle_recommend(test_dataset_i, c)))

    # cache hit ratio
    for i in range(len(cachesize)):
        c = cachesize[i]
        # FPCC
        all_list = []
        # for idx in range(len(sample_client_index)):
        for idx in range(args.clients_num):
            # Idx = sample_client_index[idx]
            recommend_movies_c = count_top_items(c, recommend_movies[idx])
            all_list.append(list(recommend_movies_c))
        recommend_movies_c = count_top_items(c, all_list)
        # print 选择缓存电影结果
        print(f' \nThe selected {c} caching movies after {epoch} global rounds:')
        # print(f' \nThe selected {c} caching movies after {args.epochs} global rounds:')
        print(recommend_movies_c)
        cache_efficiency[i] = cache_hit_ratio(test_dataset, recommend_movies_c)
        # print(f' \nThe Cache Hit Ratio with cachesize {c} after {args.epochs} global rounds:')
        print(f' \nThe Cache Hit Ratio with cachesize {c} after {epoch} global rounds:')
        print(f'Cache Hit Ratio : {cache_efficiency[i]}')
        # random caching
        random_caching_movies = list(np.random.choice(range(1, 34472 + 1), c, replace=False))
        random_cache_efficiency[i] = cache_hit_ratio(test_dataset, random_caching_movies)
        # Oracle
        Oracle_recommend_movies[c] = count_top_items(c, Oracle_recommend_movies[c])
        Oracle_cache_efficiency[i] = cache_hit_ratio(test_dataset, Oracle_recommend_movies[c])
        # Thompson Sampling
        TS_recommend_movies[c] = thompson_sampling(args, data_set, test_dataset, c)
        TS_cache_efficiency[i] = cache_hit_ratio(test_dataset, TS_recommend_movies[c])

    # m-e-greedy
    Greedy_cache_efficiency = Oracle_cache_efficiency * (1 - e) + random_cache_efficiency * e


    # 每次存数据之前，清除之前存的
    file = open("Oracle.txt", 'w').close()
    text_save('Oracle.txt', Oracle_cache_efficiency.tolist())
    # 每次存数据之前，清除之前存的
    file = open("AFPC.txt", 'w').close()
    text_save('AFPC.txt', cache_efficiency.tolist())
    # 每次存数据之前，清除之前存的
    file = open("Greedy.txt", 'w').close()
    text_save('Greedy.txt', Greedy_cache_efficiency.tolist())
    # 每次存数据之前，清除之前存的
    file = open("TS.txt", 'w').close()
    text_save('TS.txt', TS_cache_efficiency.tolist())
    # 每次存数据之前，清除之前存的
    file = open("random.txt", 'w').close()
    text_save('random.txt', random_cache_efficiency.tolist())

    # plt cache hit ratio
    plt.figure(figsize=(6, 6))
    # 设置坐标轴范围、名称
    plt.xlim(50 - 5, 400 + 5)
    plt.ylim(0, 80)
    # plt.ylim(0, 90)
    plt.xlabel('Cache Size')
    plt.ylabel('Cache Efficiency')
    plt.title('Cache Efficiency vs Cache Size')
    # Oracle Caching
    plt.plot(cachesize, Oracle_cache_efficiency, color='blue', linewidth=1.5, linestyle='-', label='Oracle')
    plt.scatter(cachesize, Oracle_cache_efficiency, s=50, marker='^', color='blue')
    # FPCC
    plt.plot(cachesize, cache_efficiency, color='red', linewidth=1.5, linestyle='-', label='FPCC')
    plt.scatter(cachesize, cache_efficiency, s=50, marker='o', color='red')
    # m-ε-greedy
    plt.plot(cachesize, Greedy_cache_efficiency, color='green', linewidth=1.5, linestyle='-', label='m-ε-greedy')
    plt.scatter(cachesize, Greedy_cache_efficiency, s=50, marker='*', color='green')
    # Thompson Sampling
    plt.plot(cachesize, TS_cache_efficiency, color='purple', linewidth=1.5, linestyle='-', label='Thompson Sampling')
    plt.scatter(cachesize, TS_cache_efficiency, s=50, marker='x', color='purple')
    # Random Caching
    plt.plot(cachesize, random_cache_efficiency, color='yellow', linewidth=1.5, linestyle='-', label='Random')
    plt.scatter(cachesize, random_cache_efficiency, s=50, marker='v', color='yellow')
    plt.legend()
    # plt.savefig(f"./save/{args.dataset}-CachingEfficiency.png")
    plt.show()

    # # plt cachesize 50 cache_efficiency vs communication rounds
    # print('\n Caching Efficiency vs Communication Rounds')
    # # recommend_movies_c50 = dict([(k, []) for k in np.arange(1, args.epochs+1)])
    # # cache_efficiency_c50 = np.zeros(args.epochs + 1)
    # # for global_round in np.arange(1, args.epochs+1):
    # recommend_movies_c50 = dict([(k, []) for k in np.arange(1, epoch + 1)])
    # cache_efficiency_c50 = np.zeros(epoch + 1)
    # for global_round in np.arange(1, epoch + 1):
    #     # for idx in range(args.clients_num):#没有DQN时用
    #     for idx in range(len(sample_client_index)):#有DQN时用
    #         # Idx = sample_client_index[idx]#有DQN时用
    #         # test_dataset_i = data_set[users_group_test[Idx]]
    #         test_dataset_i = data_set[users_group_test[idx]]
    #         if args.dataset == 'Anime':
    #             user_movie_i = convert(test_dataset_i, 34473)
    #         # recommend_list = recommend(user_movie_i, test_dataset_i, w_all_epochs[global_round-1][idx])
    #         recommend_list = recommend(user_movie_i, test_dataset_i, use_local_weights[epoch-2][idx])
    #         recommend_list = count_top_items(50, recommend_list)
    #         recommend_movies_c50[global_round].append(list(recommend_list))
    #
    #     # FPCC
    #     recommend_movies_c50[global_round] = count_top_items(50, recommend_movies_c50[global_round])
    #     # print 选择缓存电影结果
    #     # print(f' \nThe selected 50 caching movies after {global_round} global rounds:')
    #     # print(recommend_movies_c50[global_round])
    #     cache_efficiency_c50[global_round] = cache_hit_ratio(test_dataset, recommend_movies_c50[global_round])
    #     print(f' \nThe Cache Hit Ratio with cachesize 50 after {global_round} global rounds:')
    #     print(f'Cache Hit Ratio : {cache_efficiency_c50[global_round]}')
    #
    # # plt cache efficiency
    # plt.figure(figsize=(6, 6))
    # # 设置坐标轴范围、名称
    # plt.xlim(0, 10)
    # plt.ylim(0, 20)
    # plt.xlabel('Communication Round')
    # plt.ylabel('Cache Efficiency')
    # plt.title('Cache Efficiency vs Communication Round')
    # # FPCC
    # # plt.plot(range(args.epochs+1), cache_efficiency_c50, color='red', linewidth=1.5, linestyle='-', label='FPCC')
    # # plt.scatter(range(args.epochs+1), cache_efficiency_c50, s=50, marker='o', color='red')
    # plt.plot(range(epoch+1), cache_efficiency_c50, color='red', linewidth=1.5, linestyle='-', label='FPCC')
    # plt.scatter(range(epoch+1), cache_efficiency_c50, s=50, marker='o', color='red')
    # plt.legend()
    # # plt.savefig(f"./save/{args.dataset}-CacheEfficiency_CommunicationRound.png")
    # plt.show()

    print('\n Total Run Time: {0:0.4f}'.format(time.time() - start_time))
