import gymnasium as gym
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
import torch
import time
from stable_baselines3.common.vec_env import SubprocVecEnv

class TimeCallback(BaseCallback):
    def __init__(self, total_timesteps, verbose=0):
        super(TimeCallback, self).__init__(verbose)
        self.total_timesteps = total_timesteps
        self.start_time = None

    def _on_training_start(self):
        self.start_time = time.time()

    def _on_step(self):
        elapsed_time = time.time() - self.start_time
        num_timesteps = self.num_timesteps
        print(f"Elapsed time: {elapsed_time:.2f}s, Timesteps: {num_timesteps}/{self.total_timesteps}")
        return True

class LoggingCallback(BaseCallback):
    def __init__(self, verbose=0):
        super(LoggingCallback, self).__init__(verbose)

    def _on_step(self):
        # 记录损失函数和其他信息
        if self.locals.get('infos'):
            for info in self.locals['infos']:
                if 'episode' in info.keys():
                    print(f"Episode reward: {info['episode']['r']}, length: {info['episode']['l']}")
        return True

# 主函数
def main():
    num_envs = 20  # 设置并行环境的数量
    env = SubprocVecEnv([lambda: gym.make('HumanoidStandup-v5', ctrl_cost_weight=0.1) for _ in range(num_envs)])
    total_timesteps = 500000

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = PPO('MlpPolicy', env, verbose=1, device=device)

    logging_callback = LoggingCallback()
    time_callback = TimeCallback(total_timesteps=total_timesteps)

    # 训练模型，不渲染画面
    model.learn(total_timesteps=total_timesteps, callback=[time_callback, logging_callback])

    # 保存模型
    model.save("/home/sh/catkin_ws/src/ymbot_e_control/policy/ppo_humanoid")

    env.close()

if __name__ == '__main__':
    main()
