# rl_pygame_env.py
import pygame
import math
import numpy as np
import sys
import random
import gymnasium as gym
from gymnasium import spaces
from typing import Tuple

# ========== 参数 ==========
MAP_WIDTH, MAP_HEIGHT = 800, 600
OBSTACLES = [
    pygame.Rect(300, 200, 150, 100),
    pygame.Rect(550, 400, 120, 80),
    pygame.Rect(100, 400, 100, 60)
]
NUM_RAYS = 36
RAY_LENGTH = 150
RAY_STEP = 2
ROBOT_RADIUS = 15
MAX_OMEGA_SPEED = 0.2
MAX_WHEEL_SPEED = 5.0  # 对应 action=1 的速度 (像素/秒)
DT = 0.5
MAX_EPISODE_STEPS = 2000


# ========== 差速机器人类 ==========
class DiffDriveRobot:
    def __init__(self, x: float, y: float, theta: float = 0.0):
        self.x = x
        self.y = y
        self.theta = theta
        self.v = 0.0
        self.omega = 0.0

        self.old_x = self.x
        self.old_y = self.y
        self.old_theta = self.theta
        self.old_v = self.v

    def reset(self, x: float, y: float, theta: float):
        self.x = x
        self.y = y
        self.theta = theta
        self.v_l = 0.0
        self.v = 0.0
        self.omega = 0.0

        self.old_x = self.x
        self.old_y = self.y
        self.old_theta = self.theta
        self.old_v = self.v

    def set_speeds(self, toward_v: float, omega: float):
        self.v = toward_v
        self.omega = omega

    def step(self, dt: float = DT):
        # previous position and velocity update
        self.old_x = self.x
        self.old_y = self.y
        self.old_theta = self.theta
        self.old_v = self.v
        self.old_omega = self.omega

        # kinematic update
        self.theta += self.omega * dt
        # clip theta -pi~pi
        if self.theta >= 2*math.pi:
            self.theta -= math.pi*2
        elif self.theta < 0:
            self.theta += math.pi*2

        self.x += self.v * math.cos(self.theta) * dt
        self.y += self.v * math.sin(self.theta) * dt
        # clip to map
        self.x = np.clip(self.x, 0, MAP_WIDTH)
        self.y = np.clip(self.y, 0, MAP_HEIGHT)

        return [self.x, self.y, self.theta, self.v, self.omega]

# ========== RL 环境（Gym 接口） ==========
class TopDownRaycastEnv(gym.Env):
    metadata = {"render.modes": ["human"], "render_fps": 60}

    def __init__(self, use_render: bool = True, seed: int = None):
        super().__init__()
        self.obstacles = OBSTACLES
        self.robot = DiffDriveRobot(200, 150, math.pi / 4)
        self.target = np.array([400.0, 300.0])
        self.num_rays = NUM_RAYS
        self.ray_length = RAY_LENGTH
        self.ray_step = RAY_STEP
        self.max_wheel_speed = MAX_WHEEL_SPEED
        self.max_omega_speed = MAX_OMEGA_SPEED
        self.dt = DT
        self.step_count = 0
        self.max_episode_steps = MAX_EPISODE_STEPS
        self.use_render = use_render

        # Pygame surface (lazy init)
        self.surface = None
        if self.use_render:
            pygame.init()
            self.surface = pygame.display.set_mode((MAP_WIDTH, MAP_HEIGHT))
            pygame.display.set_caption("Top-Down Raycasting Robot Simulation (RL Env)")

        # Observation space:
        # [NUM_RAYS distances normalized 0..1] +
        # [dx, dy, dist_norm, target_vel, self.robot.omega, robot_theta_norm, v_norm]
        obs_len = self.num_rays + 7
        obs_high = np.ones(obs_len, dtype=np.float32)
        self.observation_space = spaces.Box(low=-1.0, high=1.0, shape=(obs_len,), dtype=np.float32)

        # Action space: continuous two wheel speeds in [-1, 1]
        self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=np.float32)

        # RNG
        self.seed(seed)

    def seed(self, s: int = None):
        self.np_random, seed = gym.utils.seeding.np_random(s)
        random.seed(seed)
        return [seed]

    # ---------- Helper methods ----------
    def _random_free_position(self) -> Tuple[float, float]:
        for _ in range(1000):
            x = random.uniform(ROBOT_RADIUS, MAP_WIDTH - ROBOT_RADIUS)
            y = random.uniform(ROBOT_RADIUS, MAP_HEIGHT - ROBOT_RADIUS)
            if not any(rect.collidepoint(x, y) for rect in self.obstacles):
                return x, y
        # fallback
        return 30.0, 30.0

    def _robot_collision(self) -> bool:
        # circle-rect collision
        for rect in self.obstacles:
            closest_x = np.clip(self.robot.x, rect.left, rect.right)
            closest_y = np.clip(self.robot.y, rect.top, rect.bottom)
            dx = self.robot.x - closest_x
            dy = self.robot.y - closest_y
            if dx * dx + dy * dy <= (ROBOT_RADIUS ** 2):
                return True
        # also check out-of-bounds (treated as collision)
        if not (0 <= self.robot.x <= MAP_WIDTH and 0 <= self.robot.y <= MAP_HEIGHT):
            return True
        return False

    def ray_cast(self):
        hits = []
        for i in range(self.num_rays):
            angle = self.robot.theta + (i / self.num_rays) * 2 * math.pi
            hit_distance = self.ray_length
            d = 0
            while d < self.ray_length:
                x = self.robot.x + d * math.cos(angle)
                y = self.robot.y + d * math.sin(angle)
                # boundary
                if x < 0 or x > MAP_WIDTH or y < 0 or y > MAP_HEIGHT:
                    hit_distance = d
                    break
                for rect in self.obstacles:
                    # print(f"x, y is : {x}, {y}, rect is : {rect}")
                    if rect.collidepoint(int(x), int(y)):
                        hit_distance = d
                        break
                d += self.ray_step
            hits.append(hit_distance)
        hits = np.array(hits, dtype=np.float32)
        norm = hits / float(self.ray_length)
        return hits, norm

    def _get_obs(self):
        hits, hits_norm = self.ray_cast()
        # relative target
        dx = (self.target[0] - self.robot.x) / MAP_WIDTH  # -1..1
        dy = (self.target[1] - self.robot.y) / MAP_HEIGHT
        dist = math.hypot(self.target[0] - self.robot.x, self.target[1] - self.robot.y)
        max_diag = math.hypot(MAP_WIDTH, MAP_HEIGHT)
        dist_norm = dist / max_diag  # 0..1
        v_norm = self.robot.v / self.max_wheel_speed
        robot_theta_norm = self.robot.theta/(np.pi*2.0)

        robot_vel = np.array([self.robot.v* np.cos(self.robot.theta), self.robot.v * np.sin(self.robot.theta)])
        relative_position = self.target - np.array([self.robot.x, self.robot.y])
        max_reward_vel = np.linalg.norm(robot_vel) * np.linalg.norm(relative_position) # cos =1 is max
        if max_reward_vel < 1e-3:
            reward_vel = -1.0
        else:
            reward_vel = np.dot(robot_vel, relative_position)/ max_reward_vel

        obs = np.concatenate([
            hits_norm,  # NUM_RAYS
            np.array([dx, dy, dist_norm, reward_vel, self.robot.omega, robot_theta_norm, v_norm], dtype=np.float32)
        ])
        # ensure obs in [0,1]
        # obs = np.clip(obs, -1.0, 1.0).astype(np.float32)
        # print(f"origin obs is {obs}")
        # print(f'origin hits is {hits_norm}')
        return obs, hits_norm
    
    def _check_collision(self):
        for rect in self.obstacles:
            if rect.collidepoint(self.robot.x, self.robot.y):
                return True
        return False
    
    def _compute_reward(self, lidar_norm) -> float:
        # -----------------Reward Calculation-----------------
        # a. safety reward for static obstacles
        eps = 1.0/self.ray_length
        reward_safety_static = np.log(lidar_norm + eps).mean()

        # b. safety reward for dynamic obstacles
        # c. velocity reward for goal direction
        robot_vel = np.array([self.robot.v* np.cos(self.robot.theta), self.robot.v * np.sin(self.robot.theta)])
        relative_position = self.target - np.array([self.robot.x, self.robot.y])
        max_reward_vel = np.linalg.norm(robot_vel) * np.linalg.norm(relative_position) # cos =1 is max
        if max_reward_vel < 1e-3:
            reward_vel = -1.0
        else:
            reward_vel = np.dot(robot_vel, relative_position)/ max_reward_vel

        # d. smoothness reward for action smoothness
        penalty_smooth = -np.linalg.norm(np.array([self.robot.v, self.robot.theta]) 
                          - np.array([self.robot.old_v, self.robot.old_theta]))  # norm > 0
        
        # e. goal distance reward
        reward_goal = -np.linalg.norm(self.target - np.array([self.robot.x, self.robot.y])) / np.linalg.norm(np.array([MAP_WIDTH, MAP_HEIGHT]))

        reward = 5.0*reward_goal + reward_vel + reward_safety_static + 0.1 * penalty_smooth

        return reward

    # ---------- Gym API ----------
    def reset(self, *, seed: int = None, options=None):
        if seed is not None:
            self.seed(seed)
        # random robot position and target (ensure both in free space)
        rx, ry = self._random_free_position()
        rtheta = random.uniform(-math.pi, math.pi)
        self.robot.reset(rx, ry, rtheta)

        tx, ty = self._random_free_position()
        self.target = np.array([tx, ty], dtype=np.float32)

        self.step_count = 0
        obs, norm = self._get_obs()
        # we also return info for gymnasium compatibility
        return obs, {}

    def step(self, action):
        # action: array([-1..1, -1..1]) => map to wheel speeds
        action = np.clip(np.array(action, dtype=np.float32), -1.0, 1.0)

        self.robot.set_speeds(action[0] * self.max_wheel_speed , action[1]* self.max_omega_speed)
        prev_dist = math.hypot(self.target[0] - self.robot.x, self.target[1] - self.robot.y)

        # simulate a single step (can be subdivided for stability)
        self.robot.step(self.dt)
        self.step_count += 1

        collided = self._robot_collision()
        curr_dist = math.hypot(self.target[0] - self.robot.x, self.target[1] - self.robot.y)
        reached = curr_dist < (ROBOT_RADIUS + 5.0)  # small threshold
        
        obs, hist_norm = self._get_obs()

        reward = self._compute_reward(hist_norm)
        done = bool(collided or reached or (self.step_count >= self.max_episode_steps))

        info = {
            "is_success": reached,
            "collided": collided,
            "step_count": self.step_count,
            "distance": curr_dist
        }

        # print(f"env obs is {obs}")

        return obs, float(reward), done, False, info  # gymnasium style: obs, reward, terminated, truncated, info

    def render(self, mode="human"):
        if not self.use_render:
            return
        if self.surface is None:
            pygame.init()
            self.surface = pygame.display.set_mode((MAP_WIDTH, MAP_HEIGHT))
        # draw
        self.surface.fill((255, 255, 255))
        # obstacles
        for rect in self.obstacles:
            pygame.draw.rect(self.surface, (80, 80, 80), rect)
        # rays
        hits, _ = self.ray_cast()
        for i, d in enumerate(hits):
            angle = self.robot.theta + (i / self.num_rays) * 2 * math.pi
            x_end = self.robot.x + d * math.cos(angle)
            y_end = self.robot.y + d * math.sin(angle)
            pygame.draw.line(self.surface, (255, 0, 0), (int(self.robot.x), int(self.robot.y)), (int(x_end), int(y_end)), 1)
        # robot
        pygame.draw.circle(self.surface, (0, 150, 255), (int(self.robot.x), int(self.robot.y)), ROBOT_RADIUS)
        hx = self.robot.x + ROBOT_RADIUS * math.cos(self.robot.theta)
        hy = self.robot.y + ROBOT_RADIUS * math.sin(self.robot.theta)
        pygame.draw.line(self.surface, (30, 30, 30), (int(self.robot.x), int(self.robot.y)), (int(hx), int(hy)), 2)
        # target
        pygame.draw.circle(self.surface, (255, 0, 0), (int(self.target[0]), int(self.target[1])), 6)
        pygame.display.flip()
        # handle quit events to keep window responsive
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                self.close()
                sys.exit(0)

    def close(self):
        if self.surface:
            pygame.display.quit()
            pygame.quit()
            self.surface = None

# ========= 测试 / Demo =========
if __name__ == "__main__":
    pygame.init()
    env = TopDownRaycastEnv(use_render=True)
    obs, _ = env.reset()
    print("obs shape:", obs.shape)
    done = False
    while True:
        # simple random policy for demo
        action = env.action_space.sample()
        obs, reward, terminated, truncated, info = env.step(action)
        env.render()
        print(f"r={reward:.2f}, dist={info['distance']:.1f}, collided={info['collided']}")
        if terminated or truncated:
            obs, _ = env.reset()
            break
    pygame.quit()
    sys.exit()

