import numpy as np
from SAC_agent import SAC_Agent
from my_utils import *
from mujoco_py.mujoco_Pendulum import MujocoPendulum
from gymnasium.wrappers import TimeLimit


def main():
    # env args
    config = Config()
    train_env = TimeLimit(MujocoPendulum(), max_episode_steps=200)
    config.state_dim = train_env.observation_space.shape[0]
    config.action_dim = train_env.action_space.shape[0]

    # train_args
    config.tau = 0.005
    config.break_steps = int(5e6)
    config.random_steps = int(1e4)
    config.update_gap = int(50)
    config.update_every = int(50)
    config.buffer_size = int(1e6)

    # eval
    config.eval_gap = int(2e3)
    config.eval_times = int(2)

    # algorithm args
    config.gamma = 0.97
    config.net_dims = [64, 32]

    ## actor
    config.actor_lr = 5e-3
    config.actor_batch_size = int(128)

    ## critic
    config.critic_lr = 5e-3
    config.critic_batch_size = int(128)

    ### SAC args
    config.SAC_alpha = 0.12
    config.SAC_adaptive_alpha = True
    config.SAC_alpha_lr = 5e-3

    # agent
    agent = SAC_Agent(config)

    # train loop
    total_steps = 0
    best_score = -np.inf
    s, _ = train_env.reset()
    while total_steps < config.break_steps:
        # done = False
        '''Interact & train'''
        if total_steps < config.random_steps:
            a = train_env.action_space.sample()  # act∈[-max,max]
        else:
            a = agent.get_action(s, eval=False)  # a∈[-1,1]
        s_next, r, dw, tr, _ = train_env.step(a)
        # done = np.logical_or(dw,tr)
        agent.replay_buffer.add_one_step(s, a, r, s_next, dw)
        s = s_next
        total_steps += 1

        '''update if its time'''
        # train 50 times every 50 steps rather than 1 training per step. Better!
        if total_steps >= config.random_steps and total_steps % config.update_gap == 0:
            for _ in range(config.update_every):
                agent.update_agent()

        '''log & save'''
        if total_steps % config.eval_gap == 0:
            eval_env = TimeLimit(MujocoPendulum(render_mode="human"), max_episode_steps=200)
            score = evaluate_policy(eval_env, agent, config.max_action, turns=config.eval_times)
            print(
                f"| eval at steps: 「{total_steps / 100:.0f}百  / {config.break_steps / 100:.0f}百」  Reward={score:.3f}")
            if score > best_score:
                best_score = score
                if score > -20:
                    agent.save_agent(file_name=f"{total_steps}__Reward={score:.3f}")
            eval_env.close()

    train_env.close()



if __name__ == "__main__":
    main()
