from __future__ import annotations

import numpy as np

from driving_gym.environment.agent.reward.base_reward import BaseReward
from driving_gym.environment.scenario.carla_scenario import CarlaScenario
from driving_gym.simulation.adapter.carla.carla_adapter import CarlaAdapter
from driving_gym.simulation.common.hd_map import Waypoint


class CarlaDefaultReward(BaseReward):
    adapter: CarlaAdapter

    def __init__(self, config, adapter=None):
        super().__init__(config, adapter)

        self.desired_speed = self.params.get("desired_speed", 6)
        self.out_lane_threshold = self.params.get("out_lane_threshold", 2.0)
        self._num_lane_invade = 0

    def reset(self):
        self._num_lane_invade = 0
        return super().reset()

    def get_reward(self, snapshot, scenario: CarlaScenario):
        transform = snapshot.data[self.actor_id]["transform"]
        velocity = snapshot.data[self.actor_id]["velocity"]
        control = self.adapter.get_control(self.actor_id)

        # reward for speed tracking
        speed = velocity.length()
        r_speed = -abs(speed - self.desired_speed)

        # reward for collision
        r_collision = 0
        if scenario.attach_default_sensors:
            collision = snapshot.data[self.actor_id]["sensors"]["collision"]
            if np.sum(collision) > 0:
                r_collision = -1

        # reward for steering
        r_steer = control.steer**2

        # reward for out of lane
        r_lane = 0
        if scenario.enable_planner:
            ego_loc = transform.location
            planner = scenario.planners[self.actor_id]
            wpt = Waypoint.from_carla_waypoint(planner.get_nearest_waypoints()[0])
            offset = wpt.transform.inverse_transform_location(ego_loc).y
            if abs(offset) > self.out_lane_threshold:
                r_lane = -1
        elif scenario.attach_default_sensors:
            data = snapshot.data[self.actor_id]["sensors"]["lane_invasion"]
            lane_invade = np.sum(data)
            r_lane = lane_invade - self._num_lane_invade
            self._num_lane_invade = lane_invade

        # longitudinal speed
        r_fast = 0
        lspeed = velocity.as_numpy_array()
        forward_vec = transform.get_forward_vector().as_numpy_array()
        lon_speed = np.dot(lspeed, forward_vec)
        if lon_speed > self.desired_speed:
            r_fast = -1

        # cost for lateral acceleration
        r_lat = -abs(control.steer) * lon_speed**2

        r = (
            200 * r_collision
            + 1 * lon_speed
            + 10 * r_fast
            + 1 * r_lane
            + r_steer * -0.5
            + 0.2 * r_lat
            - 0.1
        )

        return r, {
            "collision_reward": r_collision,
            "steer_reward": r_steer,
            "lane_reward": r_lane,
            "speed_reward": r_speed,
            "fast_reward": r_fast,
            "lat_reward": r_lat,
        }
