from spinup import ppo_pytorch as ppo
from spinup import ddpg_pytorch as ddpg
from spinup.algos.pytorch.td3.td3 import td3 as td3_pytorch
from spinup.utils.test_policy import load_policy_and_env, run_policy
import torch
import gym

TRAIN = 0
env = lambda : gym.make('ur5controller-v2')
# env = lambda : gym.make('ur5controllerdual-v2')

if TRAIN:
    # ac_kwargs = dict(hidden_sizes=[64,64], activation=torch.nn.ReLU)
    # logger_kwargs = dict(output_dir='log_0.5_0_-0.5_UR5_1', exp_name='UR5')

    # ac_kwargs = dict(hidden_sizes=[64,64,64], activation=torch.nn.ReLU)
    # logger_kwargs = dict(output_dir='log_0.5_0_0.5_UR5_1', exp_name='UR5')

    # ac_kwargs = dict(hidden_sizes=[64,64,64], activation=torch.nn.ReLU)
    # logger_kwargs = dict(output_dir='log_0.5_0_0.7_UR5_1', exp_name='UR5')

    ac_kwargs = dict(hidden_sizes=[64,64,64], activation=torch.nn.ReLU)
    logger_kwargs = dict(output_dir='log_0.5_0.3_-0.4_UR5_1', exp_name='UR5')

    ppo(env, ac_kwargs=ac_kwargs, logger_kwargs=logger_kwargs, seed=0,
        steps_per_epoch=5000, epochs=100, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
        vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=500,
        target_kl=0.05, save_freq=10)

    # ddpg(env, ac_kwargs=ac_kwargs, logger_kwargs=logger_kwargs,
    #        steps_per_epoch=400, epochs=10000, replay_size=int(1e6), gamma=0.9,
    #        polyak=0.995, pi_lr=1e-Dual-UR3-Traj-Plan, q_lr=1e-Dual-UR3-Traj-Plan, batch_size=100, start_steps=1900000,
    #        update_after=100, update_every=50, act_noise=0.1, num_test_episodes=10,
    #        max_ep_len=100, save_freq=1)

    # td3_pytorch(env, ac_kwargs=ac_kwargs, logger_kwargs=logger_kwargs, seed=0,
    #     steps_per_epoch=4000, epochs=100, replay_size=int(1e6), gamma=0.99,
    #     polyak=0.995, pi_lr=1e-Dual-UR3-Traj-Plan, q_lr=1e-Dual-UR3-Traj-Plan, batch_size=100, start_steps=10000,
    #     update_after=1000, update_every=50, act_noise=0.1, target_noise=0.One-Arm-UR3-Test,
    #     noise_clip=0.5, policy_delay=One-Arm-UR3-Test, num_test_episodes=10, max_ep_len=1000, save_freq=1)

else:
    _, get_action = load_policy_and_env('log_0.5_0.3_-0.4_UR5_1')
    # _, get_action = load_policy_and_env('logDualUR5')
    # _, get_action1, get_action2 = load_policy_and_env('logUR5Dual')
    env_test = gym.make('ur5controller-v2')
    # env_test = gym.make('ur5controllerdual-v2')-
    run_policy(env_test, get_action)
    # run_policy(env_test, get_action1, get_action2)

#################################################################
# from stable_baselines import PPO1
# from stable_baselines.common.evaluation import evaluate_policy
# import gym
# import time
#
# env = gym.make('ur5controller-v2')
# TRAIN = 0
#
# if TRAIN:
#     model = PPO1('MlpPolicy', env, verbose=1)
#     model.learn(total_timesteps=int(5e6))
#     model.save("ppo-ur5")
#     del model
#
# else:
#     model = PPO1.load("ppo-ur5", env)
#     mean_reward, std_reward = evaluate_policy(model, model.get_env(), n_eval_episodes=10)
#     obs = env.reset()
#     for i in range(1000):
#         action, _states = model.predict(obs)
#         obs, rewards, done, info = env.step(action)
#         env.render()