# train.py
import torch
import time
from ppo import PPO
from parallel_env import ParallelEnv
from environment import rm_simple_simulator
import os
import imageio
import numpy as np
import cv2


if __name__ == "__main__":
    time_start_process = time.time()
    print(f"[{time_start_process:.2f}]Initializing resources...")
    n_envs = 40
    env = ParallelEnv(n_envs=n_envs)

    ppo = PPO(env, lr=3e-4)

    model_path = "model_checkpoint.pth"
    ppo.load(model_path)

    n_steps_per_update = 2048
    max_timesteps = 10000000
    update_interval = n_steps_per_update // n_envs  # env steps per update
    test_interval = 10
    save_interval = 10

    timestep = 0
    update_count = 0
    os.makedirs("sim_gif", exist_ok=True)
    time_start_training = time.time()

    print(f"[{time_start_training:.2f}]Starting training...")
    print(
        f"[{time_start_training:.2f}]Time taken for process initialization: {time_start_training - time_start_process:.2f} seconds"
    )
    while timestep < max_timesteps:
        time_start_collect = time.time()
        ppo.collect_rollouts(n_steps_per_update)
        time_end_collect = time.time()
        ppo.update()
        time_end_update = time.time()
        timestep += n_steps_per_update
        update_count += 1

        # Logging
        expected_rest_time = (max_timesteps // n_steps_per_update - update_count) * (
            time_end_update - time_start_collect
        )
        print(
            f"[{time_end_update:.2f}]Epoch : {update_count}/{max_timesteps // n_steps_per_update}, Cost:{time_end_update - time_start_process:.2f}s, Expected rest time: {expected_rest_time / 60:.2f}min, \n"
            f"collect time: {time_end_collect - time_start_collect:.2f}s, update time: {time_end_update - time_end_collect:.2f}s"
        )

        if update_count % save_interval == 0:
            ppo.save(model_path)

        if update_count % test_interval == 0:
            print(f"[{time_end_update:.2f}]Running tests at update {update_count}")
            for test_idx in range(3):
                test_env = rm_simple_simulator(render_mode="rgb_array")
                obs, _ = test_env.reset()
                frames = []
                done = False
                step_count = 0
                max_test_steps = 1000  # Prevent infinite loops if no done
                while not done and step_count < max_test_steps:
                    actions, _, _ = ppo.policy.act(obs["visions"], obs["infos"])
                    actions_np = (
                        actions.cpu().detach().numpy().flatten()
                    )  # (18,) for single env
                    obs, rewards, dones, _, _ = test_env.step(actions_np)
                    frame = test_env.render(mode="rgb_array")
                    if frame is not None:
                        # switch BGR to RGB
                        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                        frames.append(frame)
                    done = dones.any()
                    step_count += 1
                # Save GIF
                gif_path = f"sim_gif/test_{update_count}_{test_idx}.gif"
                if frames:
                    imageio.mimsave(gif_path, frames, fps=10)
                    print(f"Saved test GIF: {gif_path} ({len(frames)} frames)")
                test_env.close()

    env.close()
    ppo.writer.close()
