import logging
import time
from collections import deque
from pathlib import Path
from typing import Dict

import gymnasium as gym
import hydra
import numpy as np
import torch as th
import wandb
from gym.wrappers.monitoring.video_recorder import ImageEncoder
from omegaconf import DictConfig, OmegaConf
from stable_baselines3 import A2C, DDPG, PPO, SAC, TD3
from stable_baselines3.common.callbacks import (
    BaseCallback,
    CallbackList,
    CheckpointCallback,
)
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.vec_env.base_vec_env import VecEnv
from torch import nn

from driving_gym.examples.carla_env_roach import make_carla_env
from driving_gym.simulation.adapter.carla.utils import start_local_server

log = logging.getLogger(__name__)


class SB3Callback(BaseCallback):
    def __init__(self, cfg, vec_env: VecEnv):
        super(SB3Callback, self).__init__(verbose=1)

        self._video_path = Path("video")
        self._video_path.mkdir(parents=True, exist_ok=True)
        self._ckpt_dir = Path("ckpt")
        self._ckpt_dir.mkdir(parents=True, exist_ok=True)

        wandb.init(
            project=cfg.wb_project,
            name=cfg.wb_name,
            notes=cfg.wb_notes,
            tags=cfg.wb_tags,
        )
        wandb.config.update(OmegaConf.to_container(cfg))

        wandb.save("./config_agent.yaml")
        wandb.save(".hydra/*")

        self.vec_env = vec_env

        self._eval_step = int(1e4)
        self.ep_stat_buffer = deque(maxlen=100)

    def _init_callback(self):
        self.n_epoch = 0
        self._last_time_buffer = self.model.num_timesteps
        self._last_time_eval = self.model.num_timesteps

    def _on_step(self) -> bool:
        return True

    def _on_training_start(self) -> None:
        pass

    def _on_rollout_start(self):
        pass

    def _on_rollout_end(self):
        # save rollout statistics
        avg_ep_stat = self.get_avg_ep_stat(self.model.ep_info_buffer, prefix="rollout/")
        wandb.log(avg_ep_stat, step=self.model.num_timesteps)

        print(f"n_epoch: {self.n_epoch}, num_timesteps: {self.model.num_timesteps}")
        # save time
        time_elapsed = time.time() - self.model.start_time
        wandb.log(
            {
                "time/n_epoch": self.n_epoch,
                "time/sec_per_epoch": time_elapsed / (self.n_epoch + 1),
                "time/fps": self.model.num_timesteps / time_elapsed,
                "time/n_updates": self.model._n_updates,
            },
            step=self.model.num_timesteps,
        )

        # evaluate and save checkpoint
        if self.model.num_timesteps - self._last_time_eval >= self._eval_step:
            self._last_time_eval = self.model.num_timesteps
            eval_video_path = (
                self._video_path / f"eval_{self.model.num_timesteps}.mp4"
            ).as_posix()
            avg_ep_stat, ep_events = self.evaluate_policy(
                self.vec_env, self.model.policy, eval_video_path
            )
            # log to wandb
            wandb.log(
                {f"video/{self.model.num_timesteps}": wandb.Video(eval_video_path)},
                step=self.model.num_timesteps,
            )
            wandb.log(avg_ep_stat, step=self.model.num_timesteps)

            # save model
            ckpt_path = (
                self._ckpt_dir / f"ckpt_{self.model.num_timesteps}.pth"
            ).as_posix()
            self.model.save(ckpt_path)
            wandb.save(f"{ckpt_path}")
        self.n_epoch += 1

    @staticmethod
    def evaluate_policy(
        env: VecEnv, policy: BasePolicy, video_path: str, min_eval_steps: int = 3000
    ):
        policy = policy.eval()
        t0 = time.time()
        for i in range(env.num_envs):
            env.set_attr("eval_mode", True, indices=i)
        obs = env.reset()

        list_render = []
        ep_stat_buffer = []
        ep_events = {}
        for i in range(env.num_envs):
            ep_events[f"venv_{i}"] = []

        n_step = 0
        n_timeout = 0
        env_done = np.array([False] * env.num_envs)

        while n_step < min_eval_steps or not np.all(env_done):
            actions, state = policy.predict(obs, deterministic=True)
            obs, reward, done, info = env.step(actions)

            list_render.append(env.render(mode="rgb_array"))

            n_step += 1
            env_done |= done

            for i in np.where(done)[0]:
                ep_stat_buffer.append(info[i]["episode_stat"])
                ep_events[f"venv_{i}"].append(info[i]["episode_event"])
                n_timeout += int(info[i]["timeout"])

        # conda install x264=='1!152.20180717' ffmpeg=4.0.2 -c conda-forge
        encoder = ImageEncoder(video_path, list_render[0].shape, 30, 30)
        for im in list_render:
            encoder.capture_frame(im)
        encoder.close()

        avg_ep_stat = SB3Callback.get_avg_ep_stat(ep_stat_buffer, prefix="eval/")
        avg_ep_stat["eval/eval_timeout"] = n_timeout

        duration = time.time() - t0
        avg_ep_stat["time/t_eval"] = duration
        avg_ep_stat["time/fps_eval"] = n_step * env.num_envs / duration

        for i in range(env.num_envs):
            env.set_attr("eval_mode", False, indices=i)
        obs = env.reset()
        return avg_ep_stat, ep_events

    @staticmethod
    def get_avg_ep_stat(ep_stat_buffer, prefix=""):
        avg_ep_stat = {}
        if len(ep_stat_buffer) > 0:
            for ep_info in ep_stat_buffer:
                for k, v in ep_info.items():
                    k_avg = f"{prefix}{k}"
                    if k_avg in avg_ep_stat:
                        avg_ep_stat[k_avg] += v
                    else:
                        avg_ep_stat[k_avg] = v

            n_episodes = float(len(ep_stat_buffer))
            for k in avg_ep_stat.keys():
                avg_ep_stat[k] /= n_episodes
            avg_ep_stat[f"{prefix}n_episodes"] = n_episodes

        return avg_ep_stat


class XtMaCNNFeatureExtractor(BaseFeaturesExtractor):
    """
    Feature extractor based on XtMaCNN for autonomous driving.
    Adapts the provided CNN for use with Stable-Baselines3.
    """

    def __init__(
        self,
        observation_space: gym.spaces.Dict,
        features_dim: int = 256,
        states_neurons=[256],
    ):
        super(XtMaCNNFeatureExtractor, self).__init__(observation_space, features_dim)

        # Extract dimensions
        n_input_channels = observation_space.spaces["birdview"].shape[0]
        state_dim = observation_space.spaces["state"].shape[0]

        # CNN for processing birdview
        self.cnn = nn.Sequential(
            nn.Conv2d(n_input_channels, 8, kernel_size=5, stride=2),
            nn.ReLU(),
            nn.Conv2d(8, 16, kernel_size=5, stride=2),
            nn.ReLU(),
            nn.Conv2d(16, 32, kernel_size=5, stride=2),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=3, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=3, stride=2),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.ReLU(),
            nn.Flatten(),
        )

        # Compute shape by doing one forward pass
        with th.no_grad():
            sample_birdview = th.as_tensor(
                observation_space.spaces["birdview"].sample()[None]
            ).float()
            n_flatten = self.cnn(sample_birdview).shape[1]

        # State processing network
        states_neurons = [state_dim] + states_neurons
        state_linear_layers = []
        for i in range(len(states_neurons) - 1):
            state_linear_layers.append(
                nn.Linear(states_neurons[i], states_neurons[i + 1])
            )
            state_linear_layers.append(nn.ReLU())
        self.state_linear = nn.Sequential(*state_linear_layers)

        # Combined features network
        self.linear = nn.Sequential(
            nn.Linear(n_flatten + states_neurons[-1], 512),
            nn.ReLU(),
            nn.Linear(512, features_dim),
            nn.ReLU(),
        )

        # Initialize weights
        self.apply(self._weights_init)

    @staticmethod
    def _weights_init(m):
        if isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain("relu"))
            nn.init.constant_(m.bias, 0.1)

    def forward(self, observations: Dict[str, th.Tensor]) -> th.Tensor:
        # Process birdview (normalize pixel values)
        birdview = observations["birdview"].float() / 255.0
        state = observations["state"]

        # Extract CNN features
        cnn_features = self.cnn(birdview)

        # Process state vector
        state_features = self.state_linear(state)

        # Combine features
        combined = th.cat([cnn_features, state_features], dim=1)
        features = self.linear(combined)

        return features


def kill_carla():
    import subprocess

    kill_process = subprocess.Popen("killall -9 -r CarlaUE4-Linux", shell=True)
    kill_process.wait()
    time.sleep(1)
    log.info("Kill Carla Servers!")


@hydra.main(config_path="config", config_name="train_sb3")
def main(cfg: DictConfig):
    start_local_server(2000)
    time.sleep(5)

    set_random_seed(cfg.seed, using_cuda=True)

    env_config = {
        "map_name": "Town03",
        "num_vehicles": [0, 120],
        "num_walkers": [0, 120],
        "endless": True,
    }
    env = make_carla_env(env_config, roach_wrapper=True)
    env = DummyVecEnv([lambda: env])
    env.render_mode = "rgb_array"

    # TODO: Make SB3 algorithm configurable
    agent = A2C(
        "MultiInputPolicy",
        env,
        learning_rate=1e-5,
        n_steps=384,
        gae_lambda=0.9,
        ent_coef=0.01,
        vf_coef=0.5,
        policy_kwargs={
            "features_extractor_class": XtMaCNNFeatureExtractor,
            "features_extractor_kwargs": {"features_dim": 512},
            "net_arch": [dict(pi=[256, 256], vf=[256, 256])],
        },
    )

    last_checkpoint_path = (
        Path(hydra.utils.get_original_cwd()) / "outputs" / "checkpoint.txt"
    )
    if last_checkpoint_path.exists():
        with open(last_checkpoint_path, "r") as f:
            wb_run_path = f.read()
            api = wandb.Api()
            run = api.run(wb_run_path)
            all_ckpts = [f for f in run.files() if "ckpt" in f.name]
            if all_ckpts:
                f = max(
                    all_ckpts, key=lambda x: int(x.name.split("_")[1].split(".")[0])
                )
                log.info(f"Resume checkpoint latest {f.name}")

                f.download(replace=True)
                agent.load(f.name, env)
                log.info(f"Resumed timesteps: {agent.num_timesteps}")

    # wandb init
    wb_callback = SB3Callback(cfg, env)
    ckpt_callback = CheckpointCallback(10000, "sb3_ckpt", type(agent).__name__)
    callback = CallbackList([wb_callback, ckpt_callback])

    # save wandb run path to file such that bash file can find it
    last_checkpoint_path = (
        Path(hydra.utils.get_original_cwd()) / "outputs" / "checkpoint.txt"
    )
    with open(last_checkpoint_path, "w") as f:
        f.write(wandb.run.path)

    agent.learn(
        total_timesteps=int(cfg.total_timesteps),
        callback=callback,
        log_interval=4,
        reset_num_timesteps=False,
    )

    agent.save("sb3_ckpt/final_model.zip")
    env.close()


if __name__ == "__main__":
    kill_carla()
    main()
    log.info("train_sb3.py DONE!")
