import sys
sys.path.append('/home/llm_user/index/meta-learning/stable_meta_learning')
from stable_baselines3.common.vec_env import DummyVecEnv

import torch

from stable_meta_learning.pearl_fine import PEARL_SAC
from stable_meta_learning.envs import make_env
from stable_meta_learning.pearl_fine import MultiTaskHerReplayBuffer,MultiTaskDictReplayBuffer

train_env_1 = make_env('PandaPickAndPlace-v3',lateral_friction=1.0,reward_type='Dense')
train_env_2 = make_env('PandaPickAndPlace-v3',lateral_friction=1.0,reward_type='Dense')
train_env_3 = make_env('PandaPickAndPlace-v3',lateral_friction=1.0,reward_type='Dense')
train_env_4 = make_env('PandaPickAndPlace-v3',lateral_friction=1.0,reward_type='Dense')
# test_env_1 = make_env('PandaPickAndPlace-v3',mass=10.0)

train_env = DummyVecEnv([train_env_1,train_env_2,train_env_3,train_env_4])
# test_env = DummyVecEnv([test_env_1])

# SAC train model
model = PEARL_SAC(encoder_lr = 1e-4,
                  latent_dim = 5,
                  encoder_hidden_dim = 300,
                  kl_lambda=0.1,
                  policy = "MultiInputPolicy",
                  env = train_env,
                  batch_size=512,
                  gamma=0.95,
                  learning_rate=1e-4,
                  train_freq=64,
                  gradient_steps=2,
                  tau=0.05,
                  replay_buffer_class=MultiTaskDictReplayBuffer,
                  policy_kwargs=dict(
                    net_arch=[512, 512, 512],
                    n_critics=2,
                  ),
                  learning_starts = 1000,
                  verbose=1,
                  device=torch.device('cuda:0'))

model.learn(total_timesteps=2_000_000,progress_bar=True)
model.save('checkpoints/PEARL_SAC_PandaPickAndPlace')

train_env.close()