import time

import numpy as np
from gym.wrappers.monitoring.video_recorder import ImageEncoder
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.vec_env import DummyVecEnv


def evaluate_policy(
    env: DummyVecEnv, policy: BasePolicy, video_path: str, min_eval_steps=3000
):
    policy = policy.eval()
    t0 = time.time()
    for i in range(env.num_envs):
        env.set_attr("eval_mode", True, indices=i)
    obs = env.reset()

    list_render = []
    ep_stat_buffer = []
    ep_events = {}
    for i in range(env.num_envs):
        ep_events[f"venv_{i}"] = []

    n_step = 0
    n_timeout = 0
    env_done = np.array([False] * env.num_envs)

    while n_step < min_eval_steps or not np.all(env_done):
        actions, values, log_probs, mu, sigma, _ = policy.forward(
            obs, deterministic=True, clip_action=True
        )
        obs, reward, done, info = env.step(actions)

        for i in range(env.num_envs):
            env.set_attr("action_value", values[i], indices=i)
            env.set_attr("action_log_probs", log_probs[i], indices=i)
            env.set_attr("action_mu", mu[i], indices=i)
            env.set_attr("action_sigma", sigma[i], indices=i)

        list_render.append(env.render(mode="rgb_array"))

        n_step += 1
        env_done |= done

        for i in np.where(done)[0]:
            ep_stat_buffer.append(info[i]["episode_stat"])
            ep_events[f"venv_{i}"].append(info[i]["episode_event"])
            n_timeout += int(info[i]["timeout"])

    # conda install x264 ffmpeg -c conda-forge
    encoder = ImageEncoder(video_path, list_render[0].shape, 30, 30)
    for im in list_render:
        encoder.capture_frame(im)
    encoder.close()

    avg_ep_stat = get_avg_ep_stat(ep_stat_buffer, prefix="eval/")
    avg_ep_stat["eval/eval_timeout"] = n_timeout

    duration = time.time() - t0
    avg_ep_stat["time/t_eval"] = duration
    avg_ep_stat["time/fps_eval"] = n_step * env.num_envs / duration

    for i in range(env.num_envs):
        env.set_attr("eval_mode", False, indices=i)
    obs = env.reset()
    return avg_ep_stat, ep_events


def get_avg_ep_stat(ep_stat_buffer, prefix=""):
    avg_ep_stat = {}
    if len(ep_stat_buffer) > 0:
        for ep_info in ep_stat_buffer:
            for k, v in ep_info.items():
                k_avg = f"{prefix}{k}"
                if k_avg in avg_ep_stat:
                    avg_ep_stat[k_avg] += v
                else:
                    avg_ep_stat[k_avg] = v

        n_episodes = float(len(ep_stat_buffer))
        for k in avg_ep_stat.keys():
            avg_ep_stat[k] /= n_episodes
        avg_ep_stat[f"{prefix}n_episodes"] = n_episodes

    return avg_ep_stat


if __name__ == "__main__":
    from driving_gym.examples.carla_env_roach import make_carla_env

    from agents.rl_birdview.models.ppo_policy import PpoPolicy

    env = make_carla_env(
        config={
            "map_name": "Town03",
            "num_vehicles": [100, 120],
            "num_walkers": [120, 160],
            "max_steps": 1500,
            "endless": True,
        },
        roach_wrapper=True,
    )
    env = DummyVecEnv([lambda: env])
    env.render_mode = "rgb_array"

    policy, _ = PpoPolicy.load("ckpt_11833344.pth")

    evaluate_policy(
        env,
        policy,
        video_path="eval_video.mp4",
        min_eval_steps=1500,
    )

    env.close()
