import random
import multiprocessing
import time

import gymnasium as gym
import numpy as np
import torch

import sac_model
import tools

env_name = 'Humanoid-v5'
env = gym.make(env_name,render_mode='human')


state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_bound = 0.4
random.seed(0)
np.random.seed(0)

torch.manual_seed(0)

actor_lr = 1e-4
critic_lr = 1e-3
alpha_lr = 1e-4

hidden_dim = 256
gamma = 0.99
tau = 0.005  # 软更新参数
buffer_size = 7000
minimal_size = 1000
batch_size = 96
target_entropy = -env.action_space.shape[0]
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

replay_buffer = tools.ReplayBuffer(buffer_size)

agent = sac_model.SAC_Agent(state_dim=state_dim, hidden_dim=hidden_dim, action_dim=action_dim, action_bound=action_bound,
                            actor_lr=actor_lr, critic_lr=critic_lr, alpha_lr=alpha_lr,target_entropy=target_entropy,
                            tau=tau,gamma=gamma, device=device)

# agent2 = sac_model.SAC_Agent_PER(state_dim=state_dim, hidden_dim=hidden_dim, action_dim=action_dim, action_bound=action_bound,
#                             actor_lr=actor_lr, critic_lr=critic_lr, alpha_lr=alpha_lr,target_entropy=target_entropy,
#                             tau=tau,gamma=gamma, device=device,buffer_size=buffer_size,batch_size=batch_size)
#
# # 同步agent的参数到agent2
# agent2.actor.load_state_dict(agent.actor.state_dict())
# agent2.critic_1.load_state_dict(agent.critic_1.state_dict())
# agent2.critic_2.load_state_dict(agent.critic_2.state_dict())
# agent2.target_critic_1.load_state_dict(agent.target_critic_1.state_dict())
# agent2.target_critic_2.load_state_dict(agent.target_critic_2.state_dict())
# agent2.log_alpha.data = agent.log_alpha.data.clone()  # 同步alpha参数
#
# # =======================================================================================================================
# num_steps = 900000
#
# def train1():
#     return_list_SAC = tools.train_agent(env, agent, num_steps, replay_buffer, minimal_size, batch_size)
#     tools.save_results_to_file(return_list_SAC, 'result/return_list_SAC.npy')
#
# def train2():
#     return_list_SAC_PER = tools.train_agent_PER(env=env,agent=agent2,num_steps=num_steps,minimal_size=minimal_size)
#     tools.save_results_to_file(return_list_SAC_PER, 'result/return_list_SAC_PER.npy')
#
#
# def plot_results_from_files():
#     y_values1 = tools.load_results_from_file('result/return_list_SAC.npy')
#     y_values2 = tools.load_results_from_file('result/return_list_SAC_PER.npy')
#     tools.plot_lines(y_values1=y_values1, y_values2=y_values2)

if __name__ == "__main__":
    pass

    agent.load_model(model_path='result/SAC最终训练结果.parameter')

    state = env.reset()
    state = state[0].tolist()
    done = False

    while not done:
        action = agent.take_action(state)
        result = env.step(action)
        next_state = result[0].tolist()
        reward = result[1]
        #done = result[2]

        #replay_buffer.add(state, action, reward, next_state, done)
        state = next_state

    env.close()

    # multiprocessing.set_start_method('spawn')
    #
    # p1 = multiprocessing.Process(target=train1)
    # p2 = multiprocessing.Process(target=train2)
    #
    # p1.start()
    # p2.start()
    #
    # while p1.is_alive() or p2.is_alive():
    #     time.sleep(2)
    #
    # # 调用绘图函数
    # plot_results_from_files()