import os

from SAC import SoftActorCritic
import time
import gym
from torch.utils.tensorboard import SummaryWriter

log_dir = './runs'
# if not os.path.exists(log_dir):
#     os.makedirs(log_dir)

model_save_dir = "./model_save"
# if not os.path.exists(model_save_dir):
#     os.makedirs(model_save_dir)

summary_writer = SummaryWriter(log_dir=log_dir)
env = gym.make("BipedalWalker-v3")
batch_size = 256
sac = SoftActorCritic(env)
epochs = 5000
steps = 3000
all_reward = []
now_epoch = 0
expl_noise = 0.25
for epoch in range(epochs):
    start_time = time.time()
    state, _ = env.reset()
    step = 0
    episode_rewards = 0
    done = False
    while not done:
        action, _ = sac.choose_action(state)
        new_state, reward, done, _, _ = env.step(action)
        if reward <= -100:
            reward = -1
            sac.buffer_memory.append([state, action, [reward], new_state, [True]])
        else:
            sac.buffer_memory.append([state, action, [reward], new_state, [False]])
        state = new_state
        episode_rewards += reward
        step += 1
        if step > steps:
            break

        if len(sac.buffer_memory) > 2000:
            sac.learn()
    if epoch % 10 == 0:
        sac.save_model(epoch, model_save_dir)
    now_epoch = epoch
    end_time = time.time()
    all_reward.append(episode_rewards)
    summary_writer.add_scalar('episode_rewards', episode_rewards, epoch)
    print("Epoch/Epochs: {}/{}, Reward: {}, Spent_Time: {}".format(epoch + 1, epochs, episode_rewards,
                                                                   end_time - start_time))
env.close()
