import numpy as np

from SAC import SoftActorCritic
import time
import gym
import torch
from replay_buffer import ReplayBuffer

model_dict = {
    "poor": "./model_save/epoch_390.pth",
    "general": "./model_save/epoch_410.pth",
    "expert": "./model_save/epoch_720.pth"
}

env = gym.make("BipedalWalker-v3")
all_data = ReplayBuffer(state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
                        max_size=int(3e6))
sac = SoftActorCritic(env)
for data_name in model_dict.keys():
    print(data_name)
    sac.load_model(model_dict[data_name])
    data = ReplayBuffer(state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0],
                        max_size=int(1e6))
    state, _ = env.reset()
    step = 0
    while step < int(1000):
        a, _ = sac.choose_action(torch.tensor(state))
        new_state, reward, done, _, _ = env.step(a)
        # data.push(state, a, reward, new_state, done)
        data.push(state, a, new_state, [reward], [done])
        step += 1
        state = new_state
        if done:
            state, _ = env.reset()
    data.save(data_name)
all_data.save("all_data")
