import torch
import numpy as np
from PPO import PPO
from arguments import get_args

arg = get_args()


def load_model():
    run_num_pretrained = 17  # set this to load a particular checkpoint num


    env_name = arg.env_name
    lr_actor = arg.lr_actor  # learning rate for actor
    lr_critic = arg.lr_critic  # learning rate for critic

    #####################################################
    # env.load_model()
    state_dim = arg.state_dim
    action_dim = arg.action_dim
    has_continuous_action_space = True
    max_ep_len = arg.max_ep_len
    action_std = 0.001

    K_epochs = arg.K_epochs  # update policy for K epochs
    eps_clip = arg.eps_clip  # clip parameter for PPO
    gamma = arg.gamma  # discount factor
    # initialize a PPO agent
    ppo_agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, has_continuous_action_space,
                    action_std)
    checkpoint_path = "PPO_{}_{}_{}.pth".format(env_name, 1, run_num_pretrained)
    print("loading network from : " + checkpoint_path)
    ppo_agent.load(checkpoint_path)
    return ppo_agent


def get_action(ppo_agent, state):
    state[2 * arg.num1:2 * arg.num1 + 2 * arg.num2] *= 0
    action = ppo_agent.select_action(state)
    return action


if __name__ == '__main__':
    ppo_agent = load_model()
    # state = np.random.random(2 * (arg.num1 + arg.num2) + 4)
    state = np.array([0.97726356, -2.8658566, 14.74572226, -2.64242902, 5.14834309,
                      3.15994763, -13.5849256, 0.37269799, -14.09126399, 6.23346449,
                      -5.56272581, -3.45009084, 5.73003424, 4.93205938, 0.02995095,
                      -0.08047966])
    action = get_action(ppo_agent, state)
    print(action)
