from __future__ import annotations

from collections import defaultdict

import numpy as np

from driving_gym.environment.agent.reward.base_reward import BaseReward
from driving_gym.environment.scenario.carla_scenario import CarlaScenario
from driving_gym.misc.roach.core.task_actor.ego_vehicle.ego_vehicle_handler import (
    EgoVehicleHandler,
    TaskVehicle,
)
from driving_gym.misc.roach.core.task_actor.ego_vehicle.reward.valeo_action import (
    ValeoAction,
)
from driving_gym.misc.roach.core.task_actor.ego_vehicle.terminal.valeo_no_det_px import (
    ValeoNoDetPx,
)
from driving_gym.misc.roach.utils.traffic_light import TrafficLightHandler
from driving_gym.simulation.adapter.carla.carla_adapter import CarlaAdapter
from driving_gym.simulation.adapter.carla.carla_config import (
    CarlaActorConfig,
    carla_util,
)

PENALTY_COLLISION_PEDESTRIAN = 0.50
PENALTY_COLLISION_VEHICLE = 0.60
PENALTY_COLLISION_STATIC = 0.65
PENALTY_TRAFFIC_LIGHT = 0.70
PENALTY_STOP = 0.80


class CarlaRoachReward(BaseReward):
    adapter: CarlaAdapter

    def __init__(self, config, adapter=None):
        super().__init__(config, adapter)

        self.reward_buffers = []
        self.info_buffers = defaultdict(list)
        self.reward_handler = None
        self.terminal_handler = None
        self.collision_reward = self.params.get("collision_reward", -100)
        self._tick_ego = False

    def reset(self):
        self.reward_buffers.clear()
        self.info_buffers.clear()
        self.reward_handler = None
        self.terminal_handler = None
        if TrafficLightHandler.carla_map is None:
            TrafficLightHandler.reset(self.adapter.world)
        TaskVehicle.VEHICLES[self.actor_id] = None

        snap_shot = self.adapter.world.get_snapshot()
        self._timestamp = {
            "step": 0,
            "frame": snap_shot.timestamp.frame,
            "relative_wall_time": 0.0,
            "wall_time": snap_shot.timestamp.platform_timestamp,
            "relative_simulation_time": 0.0,
            "simulation_time": snap_shot.timestamp.elapsed_seconds,
            "start_frame": snap_shot.timestamp.frame,
            "start_wall_time": snap_shot.timestamp.platform_timestamp,
            "start_simulation_time": snap_shot.timestamp.elapsed_seconds,
        }
        return super().reset()

    def get_reward(self, snapshot, scenario: CarlaScenario):
        if self.reward_handler is None:
            self.reward_handler, self.terminal_handler = self._create_handler(scenario)

        self._update_timestamp()
        self._update_info_buffer()
        if self._tick_ego:
            self.reward_handler._ego_vehicle.tick(self._timestamp)

        done, timeout, terminal_reward, _ = self.terminal_handler.get(self._timestamp)
        reward, detail = self.reward_handler.get(terminal_reward)

        self.reward_buffers.append(reward)
        detail["timeout"] = timeout or (self._timestamp["step"] >= scenario.max_steps)
        if done or detail["timeout"]:
            self._patch_info(detail)

        return reward, detail

    def _create_handler(self, scenario: CarlaScenario):
        ego_vehicle = self.adapter.get_actor(self.actor_id)
        carla_map = self.adapter._map

        task_vehicle = TaskVehicle.VEHICLES.get(self.actor_id, None)
        if task_vehicle is None:
            spawn_transforms = EgoVehicleHandler._get_spawn_points(carla_map)
            task = scenario.get_task(self.actor_id)
            route = task.get("route", None) or [task["end"]]
            target_transforms = [
                carla_util.carla_tranform(
                    CarlaActorConfig.parse_location(loc, carla_map)
                )
                for loc in route
            ]

            task_vehicle = TaskVehicle(
                ego_vehicle, target_transforms, spawn_transforms, scenario.endless
            )
            TaskVehicle.VEHICLES[self.actor_id] = task_vehicle
            self._tick_ego = True

        reward_handler = ValeoAction(task_vehicle)
        terminal_handler = ValeoNoDetPx(task_vehicle)
        return reward_handler, terminal_handler

    def _update_timestamp(self):
        snap_shot = self.adapter.world.get_snapshot()
        self._timestamp["step"] = (
            snap_shot.timestamp.frame - self._timestamp["start_frame"]
        )
        self._timestamp["frame"] = snap_shot.timestamp.frame
        self._timestamp["wall_time"] = snap_shot.timestamp.platform_timestamp
        self._timestamp["relative_wall_time"] = (
            self._timestamp["wall_time"] - self._timestamp["start_wall_time"]
        )
        self._timestamp["simulation_time"] = snap_shot.timestamp.elapsed_seconds
        self._timestamp["relative_simulation_time"] = (
            self._timestamp["simulation_time"]
            - self._timestamp["start_simulation_time"]
        )

    def _update_info_buffer(self):
        info = self.reward_handler._ego_vehicle.info_criteria.copy()

        if info["collision"]:
            if info["collision"]["collision_type"] == 0:
                self.info_buffers["collisions_layout"].append(info["collision"])
            elif info["collision"]["collision_type"] == 1:
                self.info_buffers["collisions_vehicle"].append(info["collision"])
            elif info["collision"]["collision_type"] == 2:
                self.info_buffers["collisions_pedestrian"].append(info["collision"])
            else:
                self.info_buffers["collisions_others"].append(info["collision"])
        if info["run_red_light"]:
            self.info_buffers["red_light"].append(info["run_red_light"])
        if info["encounter_light"]:
            self.info_buffers["encounter_light"].append(info["encounter_light"])
        if info["run_stop_sign"]:
            if info["run_stop_sign"]["event"] == "encounter":
                self.info_buffers["encounter_stop"].append(info["run_stop_sign"])
            elif info["run_stop_sign"]["event"] == "run":
                self.info_buffers["stop_infraction"].append(info["run_stop_sign"])
        if info["route_deviation"]:
            self.info_buffers["route_dev"].append(info["route_deviation"])
        if info["blocked"]:
            self.info_buffers["vehicle_blocked"].append(info["blocked"])
        if info["outside_route_lane"]:
            if info["outside_route_lane"]["outside_lane"]:
                self.info_buffers["outside_lane"].append(info["outside_route_lane"])
            if info["outside_route_lane"]["wrong_lane"]:
                self.info_buffers["wrong_lane"].append(info["outside_route_lane"])

    def _patch_info(self, info_dict):
        info = self.reward_handler._ego_vehicle.info_criteria.copy()

        info_dict["episode_event"] = self.info_buffers
        info_dict["episode_event"]["timeout"] = info_dict["timeout"]
        info_dict["episode_event"]["route_completion"] = info["route_completion"]

        total_length = float(info["route_completion"]["route_length_in_m"]) / 1000
        completed_length = (
            float(info["route_completion"]["route_completed_in_m"]) / 1000
        )
        total_length = max(total_length, 0.001)
        completed_length = max(completed_length, 0.001)

        outside_lane_length = (
            np.sum([x["distance_traveled"] for x in self.info_buffers["outside_lane"]])
            / 1000
        )
        wrong_lane_length = (
            np.sum([x["distance_traveled"] for x in self.info_buffers["wrong_lane"]])
            / 1000
        )

        if self.reward_handler._ego_vehicle._endless:
            score_route = completed_length
        else:
            if info["route_completion"]["is_route_completed"]:
                score_route = 1.0
            else:
                score_route = completed_length / total_length

        n_collisions_layout = int(len(self.info_buffers["collisions_layout"]))
        n_collisions_vehicle = int(len(self.info_buffers["collisions_vehicle"]))
        n_collisions_pedestrian = int(len(self.info_buffers["collisions_pedestrian"]))
        n_collisions_others = int(len(self.info_buffers["collisions_others"]))
        n_red_light = int(len(self.info_buffers["red_light"]))
        n_encounter_light = int(len(self.info_buffers["encounter_light"]))
        n_stop_infraction = int(len(self.info_buffers["stop_infraction"]))
        n_encounter_stop = int(len(self.info_buffers["encounter_stop"]))
        n_collisions = (
            n_collisions_layout
            + n_collisions_vehicle
            + n_collisions_pedestrian
            + n_collisions_others
        )

        score_penalty = (
            1.0
            * (1 - (outside_lane_length + wrong_lane_length) / completed_length)
            * (PENALTY_COLLISION_STATIC**n_collisions_layout)
            * (PENALTY_COLLISION_VEHICLE**n_collisions_vehicle)
            * (PENALTY_COLLISION_PEDESTRIAN**n_collisions_pedestrian)
            * (PENALTY_TRAFFIC_LIGHT**n_red_light)
            * (PENALTY_STOP**n_stop_infraction)
        )
        if info["route_completion"]["is_route_completed"] and n_collisions == 0:
            is_route_completed_nocrash = 1.0
        else:
            is_route_completed_nocrash = 0.0

        info_dict["episode_stat"] = {
            "score_route": score_route,
            "score_penalty": score_penalty,
            "score_composed": max(score_route * score_penalty, 0.0),
            "length": len(self.reward_buffers),
            "reward": np.sum(self.reward_buffers),
            "timeout": float(info_dict["timeout"]),
            "is_route_completed": float(info["route_completion"]["is_route_completed"]),
            "is_route_completed_nocrash": is_route_completed_nocrash,
            "route_completed_in_km": completed_length,
            "route_length_in_km": total_length,
            "percentage_outside_lane": outside_lane_length / completed_length,
            "percentage_wrong_lane": wrong_lane_length / completed_length,
            "collisions_layout": n_collisions_layout / completed_length,
            "collisions_vehicle": n_collisions_vehicle / completed_length,
            "collisions_pedestrian": n_collisions_pedestrian / completed_length,
            "collisions_others": n_collisions_others / completed_length,
            "red_light": n_red_light / completed_length,
            "light_passed": n_encounter_light - n_red_light,
            "encounter_light": n_encounter_light,
            "stop_infraction": n_stop_infraction / completed_length,
            "stop_passed": n_encounter_stop - n_stop_infraction,
            "encounter_stop": n_encounter_stop,
            "route_dev": len(self.info_buffers["route_dev"]) / completed_length,
            "vehicle_blocked": len(self.info_buffers["vehicle_blocked"])
            / completed_length,
        }
