'''
主要需要看的代码
main.py
from actor import Actor 
from model import Net
from shared_adam import SharedAdam
其余代码基本上是旧的没有用（暂时不要删除）
'''

# 主运行文件，设置超参数，使用parse 解析参数。
import torch
import argparse
import os, logging
import torch.multiprocessing as mp
from shared_adam import SharedAdam
from actor import Actor    
from model import Net

logging.basicConfig(level = logging.INFO,format = '%(name)s - %(message)s')
logger = logging.getLogger("mainModule")
# logger = logging.getLogger(__name__)
# logger.setLevel(level = logging.DEBUG)

if __name__ == '__main__':
    mp.set_start_method('spawn')
    
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("--actors", type=int, default=3,
                        help="the number of actors to start, default is 3")
    parser.add_argument("--seed", type=int, default=123,
                        help="the seed of random, default is 123")
    parser.add_argument("--action_type_size", type=int, default=4,
                        help="the size of action type, default is 4")
    parser.add_argument("--action_param_size", type=int, default=6,
                        help="the size of action param, default is 6")
    parser.add_argument("--target_replace_iter", type=int, default=100,
                        help="Number of steps to update the dqn, default is 100")
                        # 现在不用了
    parser.add_argument("--action_params", type=int, default=6,
                        help="the seed of random, default is 123")
    parser.add_argument('--length', type=int, default=50,
                        help='Number of steps to run the agent')
    parser.add_argument('--total_steps', type=int, default=80,
                        help='Number of steps to run the agent')

    parser.add_argument('--batch_size', type=int, default=2,
                        help='batch_size,default is 2')
    parser.add_argument("--lr", type=float, default=0.0001,
                        help="Learning rate, default is 0.001")

    parser.add_argument("--decay", type=float, default=.99,
                        help="RMSProp optimizer decay, default is .99")
    parser.add_argument("--momentum", type=float, default=0,
                        help="RMSProp momentum, default is 0")           # 在momentum和RMSProp两个优化方案中选择一个作为该网络的优化方案"
    
    parser.add_argument("--epsilon", type=float, default=.1,
                        help="RMSProp epsilon, default is 0.1")

    parser.add_argument('--save_path', type=str, default="./checkpoint.pt",
                        help='Set the path to save trained model parameters')
    parser.add_argument('--load_path', type=str, default="./checkpoint.pt",
                        help='Set the path to save trained model parameters')

    parser.add_argument('--HFODir', type=str, default="~/HFO",
                        help='Set the path of Hfo dir')

    parser.add_argument('--MAX_EP', type=int, default=2000,
                        help='Set the num of ep')
    parser.add_argument('--GAMMA', type=int, default=0.99,
                        help='decay coefficient in value')

    parser.add_argument('--UPDATE_GLOBAL_ITER', type=int, default=10,
                        help='number of cycle in episode')

    args = parser.parse_args()
    gnet = Net()        # global network
    if os.path.exists(args.save_path):
        gnet.load_state_dict(torch.load(args.save_path))
    gnet.share_memory()             # share the global parameters in multiprocessing
    opt = SharedAdam(gnet.parameters(), lr=args.lr)      # global optimizer
    global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()
    # global_ep record episode ,global_ep_r record reward in episode,res_queue record the above
    '''

    use following functions in package multiprocessing 
        share_memory()
        join()
        start()
    you can see more details in torch.optim.Adam and torch.multiprocessing
    pytorch documents:
        https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch-multiprocessing/

    '''
    # parallel training
    actors = [Actor(gnet, opt, global_ep, global_ep_r, res_queue, i, args) for i in range(args.actors)]
    [a.start() for a in actors]
    res = []                    # this res[] record episode reward to plot，res_queue can not plot
    while True:
        r = res_queue.get()
        if r is not None:
            res.append(r)
        else:
            break
    [a.join() for a in actors]
    logger.info("end test logging")

    torch.save(gnet.state_dict(), args.save_path)

    import matplotlib.pyplot as plt
    plt.plot(res)
    plt.ylabel('Moving average ep reward')
    plt.xlabel('Step')
    plt.savefig("reward.png")
    plt.close()
    
