# -*- coding: UTF-8 -*-

import torch
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import argparse,copy

from replaybuffer import ReplayBuffer
from ppo_shuyuan_dcn_2 import PPO_discrete
from baselines import Baseline
import env5 as environment
import config
import warnings
from sklearn.exceptions import DataConversionWarning
# 忽略特定类型的警告
warnings.filterwarnings("ignore", category=DataConversionWarning)
warnings.filterwarnings("ignore")
# gpus = [0]
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def pre_training(args1):

    policy = ['dep']
    task_number = [config.TASK_NUM]
    for i in task_number:
        parser = argparse.ArgumentParser(description='baseline')
        parser.add_argument('--user', type=int, default=config.USER_NUM, metavar='T', help='user number')
        parser.add_argument('--generator', type=str, metavar='G', default='zipf')
        parser.add_argument('--seed', type=int, metavar='S', default=10)
        parser.add_argument("--steps", type=int, default=int(1e6), help=" Maximum number of training steps")
        parser.add_argument("--episodes", type=int, default=1, help=" Maximum number of epochs")
        parser.add_argument('--evaltype', type=str, metavar='E', default='task_number')
        parser.add_argument('--classification', type=str, metavar='C', default='heterogeneous')
        parser.add_argument('--task', type=int, default=i, metavar='T', help='task number')
        parser.add_argument('--store', type=bool, default=True, help='store the running results')
        args2 = parser.parse_args()

        
        env = environment.Env() 
        baseline = Baseline(env,args2,args1)
        
        for po in policy:
            baseline.run_baseline(po)
            
        pre_training_list = baseline.pre_training_list
        
    return pre_training_list

def main(args, env_name, number,ver):
    env = environment.Env()
    
    args.statefinal_dim = (config.EDGE_NODE_NUM+1) * 731 
    task_num = args.task_num
    # #需要修改
    # mid_dim = []
    # buffer_size = 10

    total_steps = 0  # Record the total steps during the training
    total_epochs = 0

    rewards_list = []
    migratio_list = []
    total_download_time_list = []
    trans_list = []
    waiting_time_list = []
    total_download_size_list = []
    computation_delay_list = []
    arrange_delay_list = []
    scaling_delay_list = []
    backhaul_delay_list = []
    

    replay_buffer = ReplayBuffer(args)
    agent = PPO_discrete(args)

    path ='runs/out2000_lmj/env_{}_number_{}_ver_{}'.format(env_name, number, ver+"dep")
    writer = SummaryWriter(log_dir=path)

    save_dir = 'save_module_1/env_{}_number_{}_ver_{}/'.format(env_name, number, ver+"dep")
    
    pre_training_list = pre_training(args)

    for i in pre_training_list:
        s = i[0]
        a = i[1]
        a_logprob = i[2]
        r = i[3]
        s_ = i[4]
        dw = i[5]
        done = i[6]
        replay_buffer.store(s, a, a_logprob, r, s_, dw, done)
        
        if replay_buffer.count == args.batch_size_pre_expert:
            print("update the parameters***")
            actor_loss,critic_loss = agent.update1(replay_buffer, total_steps)
            replay_buffer.count = 0
            
        
    # load saved checkpoint from file
    if args.resume:
        saved_detailed_name = '{}.pth'.format(200)
        checkpoint_path = save_dir + saved_detailed_name
        epoch,seed = agent.load(checkpoint_path)
        total_epochs = epoch

    while total_epochs < args.episodes:
        args.seed = 10
        env.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        
        pretain_len = len(pre_training_list)
        
        if (total_epochs+1)*128 <= pretain_len: 
            print("pre-training **********")
            for item in range((total_epochs+1)*128, pretain_len):
                i = pre_training_list[item]
                s = i[0]
                a = i[1]
                a_logprob = i[2]
                r = i[3]
                s_ = i[4]
                dw = i[5]
                done = i[6]
                replay_buffer.store(s, a, a_logprob, r, s_, dw, done)
        
                if replay_buffer.count == args.batch_size:
                    print("update the parameters***")
                    actor_loss,critic_loss = agent.update(replay_buffer, total_steps)
                    replay_buffer.count = 0

        obs,_ = env.reset()

        s = obs['alg_shu']
        init_uid = obs['next_uid']
        
        episode_steps = 0

        ep_reward = 0
        ep_migration = 0
        ep_total_download_time = 0
        ep_trans = 0
        ep_waiting_time = 0
        ep_total_download_size = 0
        ep_computation_delay = 0
        ep_arrange_delay = 0
        ep_scaling_delay = 0
        ep_backhaul_delay = 0
        done = False
        
        print("####### env2 ppo + dcn + dep    #########")
    
        
        while not done:

            a, a_logprob = agent.choose_action(s,init_uid,obs)  # Action and the corresponding log probability
            # action_con, action_dis, log_prob_con, log_prob_dis = agent.select_action(s,init_uid,obs,device)
            obs_, reward, info, done, details = env.step(a)
            s_ = obs_['alg_shu']
            init_uid = obs_['next_uid']
            obs = obs_
            r = -reward

            # r = agent.rewardscaling.__call__(r)

            ep_reward += r * 1000
            ep_migration += details[0] * 1000
            ep_total_download_time += details[1]  * 1000
            ep_trans += details[2] * 1000
            ep_waiting_time += details[3] * 1000
            ep_total_download_size += details[4]
            ep_computation_delay += details[5] * 1000
            ep_arrange_delay += details[6] * 1000
            ep_scaling_delay += details[7] * 1000
            ep_backhaul_delay += details[8] * 1000

            if done:
                dw = True
            else:
                dw = False

 
            replay_buffer.store(s, a, a_logprob, r, s_, dw, done)
    
            s = s_
            episode_steps += 1 

        
            if replay_buffer.count == args.batch_size:
                actor_loss,critic_loss = agent.update(replay_buffer, total_epochs)
                writer.add_scalar('loss/actiion_loss',actor_loss,global_step=total_epochs)
                writer.add_scalar('loss/critic_loss',critic_loss,global_step=total_epochs)

                replay_buffer.count = 0

            total_steps += 1

        if total_epochs % args.save_modulecheckpoint_freq == 0:
            save_name = 'epoch_{}.pth'.format(total_epochs)
            save_path = save_dir + save_name
            agent.save_model(total_epochs,save_path)
        
        tmp_r = copy.copy(ep_reward)/task_num
        tmp_migration = copy.copy(ep_migration)/task_num
        tmp_total_download_time = copy.copy(ep_total_download_time)/task_num
        tmp_trans = copy.copy(ep_trans)/task_num
        tmp_waiting_time = copy.copy(ep_waiting_time)/task_num
        tmp_total_download_size = copy.copy(ep_total_download_size) / task_num
        tmp_computation_delay = copy.copy(ep_computation_delay) / task_num
        tmp_arrange_delay = copy.copy(ep_arrange_delay) / task_num
        tmp_scaling_delay = copy.copy(ep_scaling_delay) / task_num
        tmp_backhaul_delay = copy.copy(ep_backhaul_delay) / task_num

        rewards_list.append(tmp_r)
        migratio_list.append(tmp_migration)
        total_download_time_list.append(tmp_total_download_time)
        trans_list.append(tmp_trans)
        waiting_time_list.append(tmp_waiting_time)
        total_download_size_list.append(tmp_total_download_size)
        computation_delay_list.append(tmp_computation_delay)
        arrange_delay_list.append(tmp_arrange_delay)
        scaling_delay_list.append(tmp_scaling_delay)
        backhaul_delay_list.append(tmp_backhaul_delay)
        
        print("##############  epoch: "+str(total_epochs)+"  reward is: "+str(tmp_r)+" #########")
        print("##############  epoch: "+str(total_epochs)+"  arrange_delay is: "+str(tmp_arrange_delay)+" #########")
        print("##############  epoch: "+str(total_epochs)+"  scaling_delay is: "+str(tmp_scaling_delay)+" #########")
        print("##############  epoch: "+str(total_epochs)+"  computation_delay is: "+str(tmp_computation_delay)+" #########")
        print("##############  epoch: "+str(total_epochs)+"  delay is: "+str(tmp_arrange_delay+tmp_scaling_delay)+" #########")

        writer.add_scalar('reward/'+str(config.TASK_NUM)+'_epoch_reward:',tmp_r,global_step=total_epochs)

        writer.add_scalar('detail/'+str(config.TASK_NUM )+'total_download_time:', tmp_total_download_time, global_step=total_epochs)
        writer.add_scalar('detail/'+str(config.TASK_NUM )+'trans:',tmp_trans,global_step=total_epochs)
        writer.add_scalar('detail/'+str(config.TASK_NUM )+'waiting_time:',tmp_waiting_time,global_step=total_epochs)
        writer.add_scalar('detail/'+str(config.TASK_NUM )+'total_download_size:',tmp_total_download_size,global_step=total_epochs)
        # writer.add_scalar('detail/migration:', migratio_list, global_step=total_epochs)

        writer.add_scalar('delay/'+str(config.TASK_NUM )+'computation_delay:', tmp_computation_delay, global_step=total_epochs)
        writer.add_scalar('delay/'+str(config.TASK_NUM )+'arrange_delay:',tmp_arrange_delay,global_step=total_epochs)
        writer.add_scalar('delay/'+str(config.TASK_NUM )+'scaling_delay:',tmp_scaling_delay,global_step=total_epochs)
        writer.add_scalar('delay/'+str(config.TASK_NUM )+'backhaul_delay:',tmp_backhaul_delay,global_step=total_epochs)
        writer.add_scalar('delay/'+str(config.TASK_NUM )+'delay:',tmp_arrange_delay+tmp_scaling_delay,global_step=total_epochs)

        total_epochs += 1


if __name__ == '__main__':
    parser = argparse.ArgumentParser("Hyperparameter Setting for PPO-discrete")
    parser.add_argument("--note", type=str, default='nothing', help="remind what is running")

    parser.add_argument("--action_dim", type=int, default=int(config.EDGE_NODE_NUM + 1), help="Action_dim")
    parser.add_argument("--resume", type=bool, default=False, help="resume saved checkpoints")

    parser.add_argument('--seed', type=int, metavar='S', default=10)
    parser.add_argument("--benchmark", type=bool, default=False, help="benchmark module has differ in each training")

    parser.add_argument("--task_num", type=int, default=int(config.TASK_NUM), help="Total task number")
    parser.add_argument("--max_train_steps", type=int, default=int(1e5), help=" Maximum number of training steps") # 2e5
    parser.add_argument("--episodes", type=int, default=int(1e3), help=" Maximum number of epochs")
    # parser.add_argument("--episodes", type=int, default=int(1), help=" Maximum number of epochs")
    # 5e2 -> 3e2
    parser.add_argument("--save_modulecheckpoint_freq", type=int, default=50, help="Save frequency")     
    parser.add_argument("--evaluate_freq", type=float, default=5e2, help="Evaluate the policy every 'evaluate_freq' steps")
    parser.add_argument("--save_freq", type=int, default=20, help="Save frequency")
    parser.add_argument("--evaluate_times", type=float, default=3, help="Evaluate times")

    parser.add_argument("--batch_size", type=int, default=10240, help="Batch size")
    # parser.add_argument("--batch_size", type=int, default=512, help="Batch size")
    parser.add_argument("--batch_size_pre", type=int, default=8, help="Batch size")
    parser.add_argument("--mini_batch_size", type=int, default=64, help="Minibatch size")
    parser.add_argument("--batch_size_pre_expert", type=int, default=5000, help="Batch size")
    
    parser.add_argument("--hidden_dim", type=int, default=64, help="The number of neurons in hidden layers of the neural network")
    parser.add_argument("--hidden_width", type=int, default=64, help="The number of neurons in hidden layers of the neural network")
    # 3e-4
    parser.add_argument("--lr", type=float, default=5e-4, help="Learning rate of actor")
    parser.add_argument("--lr_a", type=float, default=5e-4, help="Learning rate of actor")
    parser.add_argument("--lr_c", type=float, default=5e-4, help="Learning rate of critic")
    parser.add_argument("--gamma", type=float, default=0.99, help="Discount factor")
    parser.add_argument("--lamda", type=float, default=0.95, help="GAE parameter")
    parser.add_argument("--epsilon", type=float, default=0.2, help="PPO clip parameter")
    parser.add_argument("--K_epochs", type=int, default=10, help="PPO parameter")

    #需要修改的参数
    parser.add_argument("--epochs_update", type=int, default=10, help="epochs_update")
    parser.add_argument("--v_iters", type=int, default=10, help="v_iters")
    parser.add_argument("--max_norm", type=int, default=10, help="max_norm")
    parser.add_argument("--random_seed", type=int, default=10, help="random_seed")
    parser.add_argument("--target_kl_dis", type=int, default=10, help="target_kl_dis")
    parser.add_argument("--target_kl_con", type=int, default=10, help="target_kl_con")
    parser.add_argument("--init_log_std", type=int, default=10, help="init_log_std")
    parser.add_argument("--lr_std", type=int, default=10, help="lr_std")

    # 0.01->0.03
    parser.add_argument("--entropy_coef", type=float, default=0.01, help="Trick 5: policy entropy")
    parser.add_argument("--use_lr_decay", type=bool, default=True, help="Trick 6:learning rate Decay")
    parser.add_argument("--use_grad_clip", type=bool, default=True, help="Trick 7: Gradient clip")
    parser.add_argument("--use_orthogonal_init", type=bool, default=True, help="Trick 8: orthogonal initialization")
    parser.add_argument("--set_adam_eps", type=float, default=True, help="Trick 9: set Adam epsilon=1e-5")
    parser.add_argument("--use_tanh", type=float, default=True, help="Trick 10: tanh activation function")
    parser.add_argument("--use_adv_norm", type=bool, default=False, help="Trick 11: tanh activation function")

    args1 = parser.parse_args()

    env_name = ['ppo_without_trick_11']
    env_index = 0

    main(args1, env_name=env_name[env_index], number='1', ver='ppo')

 
