import random
import gym
import numpy as np
import torch
import rl_utils
import sys
# pip install gym==0.25.2
# pip install --upgrade gym==0.10.5

device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
seedseed = 0
random.seed(seedseed)
np.random.seed(seedseed)
torch.manual_seed(seedseed)

'''parameters'''
actor_lr = 3e-4
critic_lr = 3e-3
num_episodes = 200
hidden_dim = 64
discount_factor = 0.98
para_soft_update = 0.005  # 软更新参数
buffer_size = 10000
minimal_size = 1000
batch_size = 64
expl_noise_sigma = 0.01  # 高斯噪声标准差
replay_buffer = rl_utils.ReplayBuffer(buffer_size)

'''env'''
print(f'gym = {gym.__version__}')
env_name = 'Pendulum-v1'
env = gym.make(env_name)
env.reset(seed=seedseed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_high = env.action_space.high[0]  # 动作最大值
action_low = env.action_space.low[0]  # 动作最小值
print(f'state_dim = {state_dim}')
print(f'action_dim = {action_dim}')
print(f'action_low = {action_low}')
print(f'action_high = {action_high}')

alg_name = 'TD3'
print(f'alg_name = {alg_name}')

if alg_name == 'DDPG' or alg_name == 'TD3':
    from off_policy.alg_DDPG_TD3_Continuous import DDPG_TD3
    agent = DDPG_TD3(alg_name, state_dim, hidden_dim, action_dim, expl_noise_sigma, actor_lr, critic_lr, para_soft_update, discount_factor, device)
elif alg_name == 'SAC':
    num_episodes = 100
    hidden_dim = 128
    alpha_lr = 3e-4
    buffer_size = 100000
    target_entropy = -env.action_space.shape[0]
    from off_policy.alg_SAC_Continuous import SAC
    agent = SAC(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, alpha_lr, target_entropy, para_soft_update,discount_factor, device)
elif alg_name == 'DuelingDoubleDQN':
    lr = 1e-2
    hidden_dim = 128
    action_epsilon = 0.01
    target_update = 50
    buffer_size = 5000
    action_dim = 11  # 将连续动作分成11个离散动作
    from off_policy.alg_D3QN import DQN
    agent = DQN(state_dim, hidden_dim, action_dim, lr, discount_factor, action_epsilon,target_update, device, dqn_type = alg_name)
    print('Training!!!!')
    return_list, max_q_value_list = rl_utils.train_DQN(agent, env, num_episodes,replay_buffer, minimal_size,batch_size)
    rl_utils.plot_results(return_list, env_name, alg_name, string_train_test = 'Training', moving_average_weight = 5)
    print(f'len(return_list) = {len(return_list)}')
    print(f'len(max_q_value_list) = {len(max_q_value_list)}')
    from off_policy.alg_D3QN import plot_Qvalue
    plot_Qvalue(max_q_value_list, env_name, alg_name, string_train_test = 'Training')
    sys.exit()



print('Training!!!!')
return_list = rl_utils.train_off_policy_agent(env, agent, num_episodes, replay_buffer, minimal_size, batch_size, action_low, action_high)
rl_utils.plot_results(return_list, env_name, alg_name, string_train_test = 'Training', moving_average_weight = 9)

print('Testing!!!!')
return_list_test = rl_utils.test_agent(env, agent, num_episodes = 50)
rl_utils.plot_results(return_list_test, env_name, alg_name, string_train_test = 'Testing', moving_average_weight = 3)

print('Rendering!!!!')
rl_utils.test_agent_render(env, agent)