from typing import TYPE_CHECKING

import cv2
import gymnasium as gym
import numpy as np

if TYPE_CHECKING:
    from driving_gym.environment.env import DrivingGym


class RlBirdviewWrapper(gym.Wrapper):
    env: "DrivingGym"
    render_mode: str = "rgb_array"

    def __init__(
        self,
        env: gym.Env,
        actor_id="hero",
        input_states=[],
        eval_mode=False,
    ):
        super(RlBirdviewWrapper, self).__init__(env)
        self._ev_id = actor_id
        self._input_states = input_states
        self._eval_mode = eval_mode
        self._render_dict = {}
        self._ep_reward = 0.0
        self._ep_step = 0

        origin_space = self.env.unwrapped.observation_space[self._ev_id]["roach"]
        self.action_value = 0.0
        self.action_log_probs = 0.0
        self.action_mu = np.array([0.0, 0.0])
        self.action_sigma = np.array([0.0, 0.0])

        state_spaces = []
        if "speed" in self._input_states:
            state_spaces.append(origin_space["speed"]["speed_xy"])
        if "speed_limit" in self._input_states:
            state_spaces.append(origin_space["control"]["speed_limit"])
        if "control" in self._input_states:
            state_spaces.append(origin_space["control"]["throttle"])
            state_spaces.append(origin_space["control"]["steer"])
            state_spaces.append(origin_space["control"]["brake"])
            state_spaces.append(origin_space["control"]["gear"])
        if "acc_xy" in self._input_states:
            state_spaces.append(origin_space["velocity"]["acc_xy"])
        if "vel_xy" in self._input_states:
            state_spaces.append(origin_space["velocity"]["vel_xy"])
        if "vel_ang_z" in self._input_states:
            state_spaces.append(origin_space["velocity"]["vel_ang_z"])

        state_low = np.concatenate([s.low for s in state_spaces])
        state_high = np.concatenate([s.high for s in state_spaces])

        self.observation_space = gym.spaces.Dict(
            {
                "state": gym.spaces.Box(
                    low=state_low, high=state_high, dtype=np.float32
                ),
                "birdview": origin_space["birdview"]["masks"],
            }
        )
        self.action_space = self.env.unwrapped.action_space[self._ev_id]

    def reset(self, *, seed=None, options=None):
        self._ep_reward = 0.0
        self._ep_step = 0

        env: "DrivingGym" = self.env.unwrapped
        if self._eval_mode:
            env.agents[self._ev_id].done_handler.config["eval_model"] = True
        else:
            env.agents[self._ev_id].done_handler.config["eval_model"] = False

        obs_ma, _ = env.reset()
        obs = self.process_obs(obs_ma[self._ev_id]["roach"], self._input_states)

        self._render_dict["prev_obs"] = obs
        self._render_dict["prev_im_render"] = obs_ma[self._ev_id]["roach"]["birdview"]["rendered"]  # fmt: skip
        return obs, {}

    def step(self, action):
        self._ep_step += 1
        action_ma = {self._ev_id: action}

        obs_ma, reward_ma, te_ma, tr_ma, info_ma = self.env.unwrapped.step(action_ma)

        obs = self.process_obs(obs_ma[self._ev_id]["roach"], self._input_states)
        reward = reward_ma[self._ev_id]
        terminate = te_ma[self._ev_id]
        truncate = tr_ma[self._ev_id]
        info = info_ma[self._ev_id]
        self._ep_reward += reward

        self._render_dict = {
            "timestamp": self.env.agents[self._ev_id].reward_handler._handler._timestamp,  # fmt: skip
            "obs": self._render_dict["prev_obs"],
            "prev_obs": obs,
            "im_render": self._render_dict["prev_im_render"],
            "prev_im_render": obs_ma[self._ev_id]["roach"]["birdview"]["rendered"],
            "action": action,
            "reward_debug": {"debug_texts": []},
            "terminal_debug": {"debug_texts": []},
        }
        if (terminate or truncate) and "episode_stat" in info:
            info["episode"] = info["episode_stat"].copy()
            info["episode"]["r"] = self._ep_reward
            info["episode"]["l"] = self._ep_step

        return obs, reward, terminate, truncate, info

    def render(self):
        self._render_dict["action_value"] = self.action_value
        self._render_dict["action_log_probs"] = self.action_log_probs
        self._render_dict["action_mu"] = self.action_mu
        self._render_dict["action_sigma"] = self.action_sigma
        return self.im_render(self._render_dict)

    @staticmethod
    def im_render(render_dict):
        im_birdview = render_dict["im_render"]
        h, w, c = im_birdview.shape
        im = np.zeros([h, w * 2, c], dtype=np.uint8)
        im[:h, :w] = im_birdview

        action_str = str(render_dict["action"])
        mu_str = str(render_dict["action_mu"])
        sigma_str = str(render_dict["action_sigma"])
        state_str = np.array2string(
            render_dict["obs"]["state"], precision=2, separator=",", suppress_small=True
        )

        txt_t = f'step:{render_dict["timestamp"]["step"]:5}, frame:{render_dict["timestamp"]["frame"]:5}'
        im = cv2.putText(
            im, txt_t, (3, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1
        )
        txt_1 = f'a{action_str} v:{render_dict["action_value"]:5.2f} p:{render_dict["action_log_probs"]:5.2f}'
        im = cv2.putText(
            im, txt_1, (3, 24), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1
        )
        txt_2 = f"s{state_str}"
        im = cv2.putText(
            im, txt_2, (3, 36), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1
        )

        txt_3 = f"a{mu_str} b{sigma_str}"
        im = cv2.putText(
            im, txt_3, (w, 12), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1
        )
        for i, txt in enumerate(
            render_dict["reward_debug"]["debug_texts"]
            + render_dict["terminal_debug"]["debug_texts"]
        ):
            im = cv2.putText(
                im,
                txt,
                (w, (i + 2) * 12),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.3,
                (255, 255, 255),
                1,
            )
        return im

    @staticmethod
    def process_obs(obs, input_states, train=True):

        state_list = []
        if "speed" in input_states:
            state_list.append(obs["speed"]["speed_xy"])
        if "speed_limit" in input_states:
            state_list.append(obs["control"]["speed_limit"])
        if "control" in input_states:
            state_list.append(obs["control"]["throttle"])
            state_list.append(obs["control"]["steer"])
            state_list.append(obs["control"]["brake"])
            state_list.append(obs["control"]["gear"] / 5.0)
        if "acc_xy" in input_states:
            state_list.append(obs["velocity"]["acc_xy"])
        if "vel_xy" in input_states:
            state_list.append(obs["velocity"]["vel_xy"])
        if "vel_ang_z" in input_states:
            state_list.append(obs["velocity"]["vel_ang_z"])

        state = np.concatenate(state_list)

        birdview = obs["birdview"]["masks"]

        if not train:
            birdview = np.expand_dims(birdview, 0)
            state = np.expand_dims(state, 0)

        obs_dict = {"state": state.astype(np.float32), "birdview": birdview}
        return obs_dict


if __name__ == "__main__":
    import cv2

    from driving_gym.examples.carla_env_roach import make_carla_env

    env = RlBirdviewWrapper(
        make_carla_env(config=dict(autopilot=True, endless=True)),
        input_states=["control", "vel_xy"],
    )

    try:
        for i in range(10):
            obs, info = env.reset()
            done = False

            while not done:
                action = env.action_space.sample()
                obs, reward, te, tr, info = env.step(action)
                done = te or tr
                img = env.render()
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                cv2.imshow("Roach", img)
                cv2.waitKey(1)
    finally:
        env.close()
