from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import create_env
from model import A3C_MLP
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
import time

class Args():
    def __init__(self, *args):
        self.lr = 0.0001
        self.gamma = 0.99
        self.tau = 1.00
        self.seed = 1
        self.env = 'uav-v0'
        self.workers = 16
        self.num_steps = 20
        self.max_episode_length = 2000
        self.variance = 0.4
        self.prior_decay = 0.0005
        self.shared_optimizer = True
        self.load = False
        self.save_max = True
        self.optimizer = 'Adam'
        self.load_model_dir = 'trained_models/'
        self.save_model_dir = 'trained_models/'
        self.log_dir = 'logs'
        self.model = 'MLP'
        self.stack_frames = 1
        self.amsgrad = True
        self.training_steps = int(2E7)
        self.l2_regular = 1e-5
        self.test_episodes = 50
        self.cache_interval = 50000
        self.use_prior = False


if __name__ == '__main__':
    args = Args( )
    args.seed = args.seed * 1201
    torch.manual_seed(args.seed)
    env = create_env(args.env, -1)

    shared_model = A3C_MLP(env.observation_space, env.action_space, args.stack_frames)
    shared_model.share_memory()

    os.makedirs(args.log_dir, exist_ok=True)

    if args.load:
        saved_state = torch.load('{0}{1}.dat'.format(
            args.load_model_dir, args.env), map_location=lambda storage, loc: storage)
        shared_model.load_state_dict(saved_state)

    # optimizer for policy and value optimization
    if args.shared_optimizer:
        if args.optimizer == 'RMSprop':
            optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
        if args.optimizer == 'Adam':
            optimizer = SharedAdam(
                shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad)
        optimizer.share_memory()
    else:
        optimizer = None

    # reinforcement learning with non-expert helper
    processes = []
    p = mp.Process(target=test, args=(-1, args, shared_model))
    p.start()
    processes.append(p)
    time.sleep(0.1)
    for rank in range(0, args.workers):
        p = mp.Process(target=train, args=(rank, args, shared_model, optimizer))
        p.start()
        processes.append(p)
        time.sleep(0.1)
        
    for p in processes:
        time.sleep(0.1)
        p.join()
