import datetime
import os
import time
import numpy as np
import random
import sys
import torch
import matplotlib.dates as mdates
import tqdm
import pickle
sys.path.append(r'E:\项目\rf_computing')
from env.network_playground_releasev1 import NetworkEnv
from algorithm.DQN import DQN
import matplotlib.pyplot as plt
from config.DQNConfig import DQNConfig
from config.ENVConfig import ENVConfig
from config.TRAINConfig import TrainConfig
from baseline import optimal_time_matching
import matplotlib as mpl
import logging
mpl.rcParams['font.family'] = 'STKAITI' #'STKAITI'——字体
plt.rcParams['axes.unicode_minus'] = False   # 解决坐标轴负数的负号显示问题
logger = logging.getLogger('my_app')
logger.setLevel(logging.DEBUG)
# 指定日志输出路径
date = datetime.datetime.now().strftime('%Y%m%d')
log_path = f'./log/dqnv1_{date}.log'
file_handler = logging.FileHandler(log_path,  encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
# 将处理程序添加到记录器
logger.addHandler(file_handler)
"""
两个个agent,一个main,子agent共享
"""


def env_agent_config(*k):

    # 网络环境初始化
    cfg = ENVConfig(visenv=f'DQN_g_spedd_{k[0]}')

    nenv = NetworkEnv(cfg)
    dqncfg = DQNConfig(n_states=cfg.edge_num*4+2,n_actions=cfg.edge_num,hidden_dim=256,lr=0.0001,buffer_size=int(2e5),main_win=f'DQN_maintrain_gspeed{k[0]}')  #4*2+2
    print(dqncfg.__dict__)
    dqn_cfgsub = DQNConfig(n_states=cfg.nodes_of_edge*4+2,n_actions=cfg.nodes_of_edge,hidden_dim=256,lr=0.0002,buffer_size=int(2e5),sub_win=f'DQN_subtrain_gspeed{k[0]}')#4*3+2
    agent_main = DQN(dqncfg)
    agent_sub = DQN(dqn_cfgsub)




    n_states = nenv.observation_space.shape[0]
    n_actions = nenv.action_space.n
    print(f"0级环境状态空间维度：{n_states}，动作空间维度：{n_actions}")
    print(nenv.info())

    return nenv,agent_main, agent_sub,cfg

def train_test(cfg,nenv,agent_main, agent_sub):
    totalStep =cfg.totalStep
    GenerateTaskSpeed = cfg.GenerateTaskSpeed
    policy = cfg.policy
    MODE = cfg.MODE
    if MODE=='TN':
        eps =cfg.train_eps
    elif MODE=='TS':
        eps = cfg.test_eps
        
    print(f'policy:{policy},mode:{MODE},i_eps:{eps}')
    random.seed(cfg.random_seed)
    rewards = []
    main_rewards = []
    

    cfg_str = '\n'.join([f"{key}: {value}" for key, value in cfg.__dict__.items()])
    print("Parsed arguments:\n" + cfg_str)
    logger.info("Parsed arguments:\n" + cfg_str)
    for i_ep in tqdm.tqdm(range(eps)):
        taskID = cfg.TaskInitID
        tempbuffer ={}  #临时bufffer存放奖励
        tempbuffer_work1 = {}
        ep_return = 0
        main_return=0
        delay_num = 0
        tasks_speed = 0
        sys_time = 0

        TASK_SPLIT = cfg.split #是否进行任务拆分
        SPLIT_NUM=cfg.split_num  #分类数
        render = cfg.render
        split_tasks_info = {}
        print(f'{i_ep+1}/{eps}')
        nenv.reset()

        for step in range(int(totalStep)):  #
            if render:
                if i_ep%50==0:
                    nenv.render(i_ep,cfg.policy)
            if step % GenerateTaskSpeed == 0:
                if MODE=='TN':
                    GenerateTaskSpeed = random.randint(1,5)
                for i in range(len(nenv.user_list)):
                    random_user = random.choice(nenv.user_list)  #随机选择用户
                    if cfg.MODE=='TN':
                        gmd = 'Train'
                    elif cfg.MODE=='TS':
                        gmd='Test'
                    task = random_user.generate_task_v1(taskID,gmode=gmd)
                    if TASK_SPLIT and task.splitbale==1:
                        Computing_Re = task.computing_re
                        Bandwidth_Re = task.bandwidth_re
                        split_tasks_info[taskID] = [Computing_Re,Bandwidth_Re,task.td,sys_time,0,0]  #计算需求，带宽需求，任务延迟时间，系统时间，任务总进度0,周转时间

                        for i in range(SPLIT_NUM):
                            cpt_r2 = Computing_Re/SPLIT_NUM
                            bdw_r2 = Bandwidth_Re/SPLIT_NUM
                            taskid2 = str(taskID)+'-'+str(i+1) #转为字符串类型
                            task = random_user.generate_split_task(cpt_r2,bdw_r2, taskid2)


                            nenv.add_task(task)
                    else:
                        nenv.add_task(task)
                    taskID += 1


            if nenv.task_queue.qsize()>0:  #有任务的时候进行分配
                task_len = nenv.task_queue.qsize()

                for i in range(task_len):  #全部任务依次分配
                    # print(f'分配任务{i+1}/{task_len}')
                    state = nenv.get_state()
                    # print(f'全局状态：{state}')
                    if policy=='agent'and MODE=='TN':
                        action = agent_main.sample_action(state)  # 选择动作
                    elif policy=='agent'and MODE=='TS':
                        action = agent_main.predict_action(state)  # 选择动作
                    elif policy=='random':
                        _nodes = cfg.edge_num
                        action = random.randint(0, _nodes-1)
                    elif policy=='optimal_time_matching':

                        action = optimal_time_matching(state)

                    state_next,r,done,tid = nenv.step1(action)  #全局奖励
                    # if ep_step==9990 and i ==task_len-1:
                    #     print('本回合最后一次分配')
                    #     done = True
                    tempbuffer[tid]=[state,action,r,state_next,done]


            for group_i in range(len(nenv.work_groups)):  #工作组内进行任务分配
                if nenv.task_queue_edge_list[group_i].qsize()>0:
                    task1_len = nenv.task_queue_edge_list[group_i].qsize()
                    for i in range(task1_len):
                        gstate = nenv.get_group_state(group_i)
                        # print(f'组{group_i}状态',gstate)
                        if policy == 'agent' and MODE == 'TN':
                            action = agent_sub.sample_action(gstate)  # 选择动作
                        elif policy == 'agent' and MODE == 'TS':
                            action = agent_sub.predict_action(gstate)  # 选择动作
                        elif policy == 'random':
                            _nodes = cfg.nodes_of_edge
                            action = random.randint(0, _nodes - 1)
                        elif policy == 'optimal_time_matching':

                            action = optimal_time_matching(gstate)

                        state_next, r, done, taskid = nenv.stepbyworker1(group_i,action)   #内部奖励

                        tempbuffer_work1[taskid] =[gstate,action,r,state_next,done]
                    if render:
                        for i in range(int(len(state_next[2:])/4)):
                            nenv.cl[group_i][i].append(state_next[2+4*i+2])
                            nenv.bl[group_i][i].append(state_next[2+4*i+3])# work  带宽负载

            sys_time += 1
            for group_i in  range(len(nenv.work_groups)) : #任务处理
                for j,worker in enumerate(nenv.work_groups[group_i]):
                    worker.process_computing_task()
                    task_done = worker.process_transmit_task()
                    if len(task_done)>0:
                        for done in task_done:
                            taskid, task_speed, delay, reward2, alpha = done[0], done[1], done[2], done[3], done[4]

                            sasrd_w1 = tempbuffer_work1[taskid]
                            # laod_std = nenv.cal_std()
                            beta = 10  # 负载匹配权重
                            std_r = 0
                            # if alpha != 1:  # 分配到终端节点有奖励
                            #     std_r =  (beta * laod_std[group_i][j-1])
                                # print(reward2,std_r)
                            sasrd_w1[2] = reward2 + std_r
                            
                            # print(reward2, nenv.cal_std())
                            sasrd_main = tempbuffer[taskid]
                            sasrd_main[2] = task_speed
                            if policy == 'agent' and MODE=='TN':
                                agent_sub.memory.push(tuple(sasrd_w1))
                                agent_main.memory.push(tuple(sasrd_main))  # 更新main_agent buffer
                            if '-' in str(taskid):
                                tid = taskid.split('-')[0]
                                tid = int(tid)
                                split_tasks_info[tid][4]+=1
                                # print(f'tid:{tid}完成第{split_tasks_info[tid][4]}/2个子任务')
                                tt = sys_time-split_tasks_info[tid][3]
                                if tt>split_tasks_info[tid][5]: #任务周转时间
                                    split_tasks_info[tid][5]=tt

                                tasks_speed+=0
                            else:
                                tasks_speed+=task_speed
                                delay_num += delay

                            ep_return += reward2
                            main_return+=task_speed

                            # print(f'写入缓冲池：{tuple(sasrd)}')
                if policy=='agent'and MODE=='TN':
                    agent_sub.update()

            if policy=='agent'and MODE=='TN':
                if step%1==0:  #控制main 更新速度
                    agent_main.update()
        print('任务分配结束，处理未完成任务')
        while 1:
            cqs = 0
            bqs = 0
            # for w in nenv.edge_list:
            #     _, _, cq, bq = w.get_all_node_state()
            #     cqs += cq
            #     bqs += bq
            for j, g in enumerate(nenv.work_groups):
                for i, w in enumerate(g):
                    cqs += len(w.task_process_list)
                    bqs += len(w.task_transmit_list)
            if cqs == 0 and bqs == 0:
                print('任务全部处理完成，回合结束')
                # for j, g in enumerate(nenv.work_groups):
                #     for i, w in enumerate(g):
                #         print(f'{i}/{j} 计算队列长度为：{len(w.task_process_list)}')
                #         print(f'{i}/{j} 传输队列长度为：{len(w.task_transmit_list)}')
                break
            else:
                sys_time += 1
                for group_i in range(len(nenv.work_groups)):  # 任务处理
                    for j, worker in enumerate(nenv.work_groups[group_i]):
                        worker.process_computing_task()
                        task_done = worker.process_transmit_task()
                        if len(task_done) > 0:
                            for done in task_done:
                                taskid, task_speed, delay, reward2, alpha = done[0], done[1], done[2], done[3], done[4]

                                sasrd_w1 = tempbuffer_work1[taskid]
                                # laod_std = nenv.cal_std()
                                beta = 2  # 负载匹配权重
                                std_r = 0
                                # if alpha != 1:  # 分配到终端节点有奖励
                                #     std_r = (beta * laod_std[group_i][j - 1])
                                #     print(reward2,std_r)

                                sasrd_w1[2] = reward2 - std_r
                                # print(reward2, nenv.cal_std())
                                sasrd_main = tempbuffer[taskid]
                                sasrd_main[2] = task_speed #作为奖励
                                if policy == 'agent' and MODE=='TN':
                                    agent_sub.memory.push(tuple(sasrd_w1))
                                    agent_main.memory.push(tuple(sasrd_main))  # 更新main_agent buffer

                                if '-' in str(taskid):
                                    tid = taskid.split('-')[0]
                                    tid = int(tid)

                                    split_tasks_info[tid][4] += 1
                                    # print(f'tid:{tid}完成第{split_tasks_info[tid][4]}/2个子任务')
                                    tt = sys_time - split_tasks_info[tid][3]

                                    if tt > split_tasks_info[tid][5]:  # 任务周转时间
                                        split_tasks_info[tid][5] = tt

                                    tasks_speed += 0
                                else:
                                    tasks_speed += task_speed
                                    delay_num += delay

                                ep_return += reward2
                                main_return+=task_speed


        if i_ep % 10 == 0:
            print('main buffer 大小：',len(agent_main.memory))
        task_num = taskID - cfg.TaskInitID
        all_task_delay = 0
        all_cost = 0
        av_edge_use_c = 0
        av_edge_use_t = 0
        av_node_use_c = 0
        av_node_use_t = 0
        for j, g in enumerate(nenv.work_groups):
            for i, w in enumerate(g):
                print(f'第{j}/{i}累计延时{w.task_delay_acc},计算负载率{1 - w.idle_c / sys_time},传输负载率{1 - w.idle_t / sys_time}')
                if w.nodeID % 100 == 0:

                    av_edge_use_c += 1 - w.idle_c / sys_time
                    av_edge_use_t += 1 - w.idle_t / sys_time
                else:

                    av_node_use_c += 1 - w.idle_c / sys_time
                    av_node_use_t += 1 - w.idle_t / sys_time
                all_task_delay += w.task_sysdelay
        all_cost = (av_edge_use_c+av_edge_use_t)/cfg.edge_num*3+(av_node_use_c+av_node_use_t)/((cfg.nodes_of_edge-1)*cfg.edge_num)*1
        print(f'边缘节点平均计算负载率为：{av_edge_use_c/cfg.edge_num},传输负载率：{av_edge_use_t/cfg.edge_num}，终端节点平均计算负载率为：{av_node_use_c/((cfg.nodes_of_edge-1)*cfg.edge_num)},传输负载率：{av_node_use_t/((cfg.nodes_of_edge-1)*cfg.edge_num)}')
        logger.info(f'边缘节点平均计算负载率为：{av_edge_use_c/cfg.edge_num},传输负载率：{av_edge_use_t/cfg.edge_num}，终端节点平均计算负载率为：{av_node_use_c/((cfg.nodes_of_edge-1)*cfg.edge_num)},传输负载率：{av_node_use_t/((cfg.nodes_of_edge-1)*cfg.edge_num)}')
        print(f'拆分任务数为：{len(split_tasks_info)}')
        ttt=0
        for k,v in split_tasks_info.items(): #处理拆分任务
            ttt+=v[5]
            # print(k,v)
            assert v[4]==cfg.split_num
            task_s = (v[0]+v[1])/v[5]
            tasks_speed +=task_s
            if v[5]>v[2]:
                # print(f'超市tid：{k}:{v}')
                delay_num+=1
        if len(split_tasks_info)>0:
            print(f'拆分任务平均时延：{ttt/len(split_tasks_info)}')
        all_task_delay+=ttt
        all_av_taskdelay = all_task_delay / task_num  # 平均任务延迟
        av_task_speed = tasks_speed/task_num   #平均任务速度
        delay_rate = delay_num/task_num  #延迟率
        rewards.append(ep_return)   #rl 回合奖励
        main_rewards.append(main_return)



        now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        print(f"回合：{i_ep + 1}/{eps}，，split:{cfg.split}-{cfg.split_num}|std| policy:{policy}  总奖励：{ep_return:.2f}; 任务数:{task_num},平均任务速度：{av_task_speed:.2f},超时率：{delay_rate},平均任务时延：{all_av_taskdelay},总代价：{all_cost} mianEpislon：{agent_main.epsilon:.3f},subEpislon：{agent_sub.epsilon:.3f} Time:{now}")
        logger.info(f"回合：{i_ep + 1}/{eps}，split:{cfg.split}-{cfg.split_num}|std|  policy:{policy}  总奖励：{ep_return:.2f}; 任务数:{task_num},平均任务速度：{av_task_speed:.2f},超时率：{delay_rate},平均任务时延：{all_av_taskdelay},总代价：{all_cost} mianEpislon：{agent_main.epsilon:.3f},subEpislon：{agent_sub.epsilon:.3f} Time:{now}")
        nenv.info()

    dic = {}
    dic['DQNu_Rewards'] = main_rewards
    dic['DQNd_Rewards'] = rewards
    print(dic)
    # plot_rewards(cfg, 'dqndqn-1-5-20240215', dic)
    return rewards,agent_main,agent_sub,av_task_speed,delay_rate,all_av_taskdelay,all_cost

def smooth(data, weight=0.9):
    '''用于平滑曲线，类似于Tensorboard中的smooth曲线
    '''
    last = data[0]
    smoothed = []
    for point in data:
        smoothed_val = last * weight + (1 - weight) * point  # 计算平滑值
        smoothed.append(smoothed_val)
        last = smoothed_val
    return smoothed

def plot_rewards(cfg, tag='train',**kw):
    ''' 画图
    '''
    # sns.set()

    fig, ax = plt.subplots(1, 1, figsize=(10, 7))
    today = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    ax.set_title(f"{tag} curve of {cfg.policy} for {cfg.env_name} -T:{today}")
    ax.set_xlabel('epsiodes')
    for k,v in kw.items():
        ax.plot(v, label=k)
    # ax.plot(smooth(rewards), label='smoothed')
    # ax.plot(w1_returns, label='w1')

    ax.legend()
    plt.savefig(f'../images/{tag}.png')
    # plt.show()
    # plt.close()
    # time.sleep(1000)
def train_agent():

    dic = {}
    for i,p in enumerate(['agent','agent']):
        if i==1:
            continue
        g_speed = 5
        nenv, agent_main, agent_sub, cfg = env_agent_config(g_speed)
        cfg.GenerateTaskSpeed = g_speed
        cfg.policy = p
        cfg.MODE='TN'

        rewards, agent_main_t,agent_sub_t,_,_,_,_ = train_test(cfg, nenv, agent_main, agent_sub)
        print('完成训练！')

        model_dir = 'E:\\项目\\rf_computing\\models\\'
        main_model_name = f'dqn_main_model_cont_speed1-5_task_v1_cost_0215_woutstd.pkl'
        sub_model_name = f'dqn_sub_model_cont_speed1-5_task_v1_cost_0215_woutstd.pkl'
        with open(model_dir + main_model_name, 'wb') as f1:
            pickle.dump(agent_main, f1)
        with open(model_dir + sub_model_name, 'wb') as f2:
            pickle.dump(agent_sub, f2)

        print(f'保存模型{main_model_name}、{sub_model_name}，路径为：{model_dir}')
        dic[p] = rewards

    print(dic)
    plot_rewards(cfg, tag=f'g_speed_{g_speed}_debug_state_split', **dic)
def test_agent(main_model_name = 'dqn_main_model.pkl',sub_model_name = 'dqn_sub_model.pkl'):
    labels = np.array(['平均任务速度', '超时率', '平均任务时延','总代价'])
    stats_list=[]
    g_speed = 5
    nenv, agent_main, agent_sub, cfg = env_agent_config(g_speed)
    model_dir = 'E:\\项目\\rf_computing\\models\\'
    main_model_name = ['dqn_main_model_dsc_task_v1_cost_0131_woutstd.pkl','dqn_main_model_cont_task_v1_cost_0131_woutstd.pkl']
    sub_model_name = ['dqn_sub_model_dsc_task_v1_cost_0131_woutstd.pkl','dqn_sub_model_cont_task_v1_cost_0131_woutstd.pkl']
    dic = {}
    cfg.MODE = 'TS'
    for i,p in enumerate(['agent','agent']):
        logger.info(main_model_name[i]+' '+sub_model_name[i]+'\n')
        print(main_model_name[i],' ',sub_model_name[i])
        with open(model_dir+main_model_name[i], 'rb') as f1:
            agent_main = pickle.load(f1)
        with open(model_dir+sub_model_name[i], 'rb') as f2:
            agent_sub = pickle.load(f2)
        cfg.policy = p

        reds,_,_,av_task_speed,delay_rate,all_av_taskdelay,all_cost = train_test(cfg, nenv, agent_main, agent_sub)
        stats_list.append(np.array([av_task_speed,delay_rate,all_av_taskdelay,all_cost]))
        dic[p+str(i)] = reds

    for p in ['random','optimal_time_matching']:
        cfg.policy = p
        logger.info(f'policy:{p}')
        reds,_,_,av_task_speed,delay_rate,all_av_taskdelay,all_cost = train_test(cfg,nenv,agent_main,agent_sub)
        stats_list.append(np.array([av_task_speed, delay_rate, all_av_taskdelay,all_cost]))
        dic[p] = reds
    print(dic)
    plot_rewards(cfg, tag=f'test_on_6_alo_v1_0122', **dic)
    num_vars = len(labels)
    angles = np.linspace(0, 2 * np.pi, num_vars, endpoint=False).tolist()
    angles += angles[:1]

    fig, ax = plt.subplots(subplot_kw=dict(polar=True), figsize=(8, 8))

    # 绘制雷达图
    alths = ['random','optimal_time_matching']
    for i, stats in enumerate(stats_list):
        stats = np.concatenate((stats, [stats[0]]))
        ax.fill(angles, stats, alpha=0.25, label=f' {alths[i]}')

    # 添加标签
    ax.set_yticklabels([])
    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(labels)

    # 添加图例
    ax.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))

    plt.show()

    
    

if __name__=='__main__':
    import os

    # 禁用 http 代理
    os.environ['http_proxy'] = ''
    os.environ['https_proxy'] = ''
    train_agent()
    # test_agent()


