import numpy as np
import gymnasium as gym
import torch
import pygame

INFINITY = 1e6
ALL_GOAL = [(2,2),(4,2),(6,0),(8,-2),(10,0)]

Int = np.int32
Float = np.float64

import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from feature_extract.model_history import try_get_model
CONST_INT_SEQ_LEN = 5
class CockroachEnv(gym.Env):
    # 常数
    tolerance = 0.2

    def __init__(self, render_mode='human'):
        # (lstim, rstim), freq
        self.action_space = gym.spaces.Box(10, 60, shape=(2,), dtype=Int)
        # (dx, dy), theta
        self.observation_space = gym.spaces.Tuple((gym.spaces.Box(
            -INFINITY * np.ones(2),
            INFINITY * np.ones(2),
            dtype=Float
        ),
        gym.spaces.Box(-np.pi, np.pi, shape=(1,), dtype=Float)))

        # empty before call `reset()`
        self.coords = None
        self.theta = None
        self.goal_coords = None
        self.goal_achieved = 0
        self.steps = 0
        self.pose_stim_history = None
        self.current_action = None
        self.total_reward = 0
        self.reward = 0
        self.done = None
        # reset
        self.reset()

        if render_mode == 'human':
            # Pygame initialization
            pygame.init()
            self.screen_width = 1200
            self.screen_height = 800
            self.screen = pygame.display.set_mode((self.screen_width, self.screen_height))
            pygame.display.set_caption("Cockroach Environment")
            self.cockroach_img = pygame.image.load(r"E:\25spring\FYP\image\cockroach.png")
            self.cockroach_img = pygame.transform.scale(self.cockroach_img, (60, 60))  # Scale to 3cm
            self.GRID_COLOR = (200, 200, 200)
            self.GOAL_COLOR = (255, 0, 0)
            self.CURRENT_GOAL_COLOR = (0, 255, 0)
            self.TEXT_COLOR = (0, 0, 0)
            self.camera_offset = [self.screen_width // 2, self.screen_height // 2]
            self.font = pygame.font.Font(None, 24)
        self.render_mode = render_mode

    def reset(self, seed=0):
        self.goal_coords = np.array([2, 2], dtype=Float)  # 只保留第一个目标点
        self.goal_achieved = 0
        self.steps = 0
        self.coords = np.array([0, 0], dtype=Float) # 从原点出发
        self.theta = np.array(0, dtype=Float) # 初始方向朝右
        self.total_reward = 0
        self.reward = 0
        self.done = False
        self._reset(seed)
        return self.goal_coords - self.coords, self.theta

    def _reset(self, seed=0):
        pass

    def step(self, action):
        self.steps += 1
        self.current_action = action
        self._step(action)
        reward = 0
        done = False
        dist = ((self.goal_coords - self.coords) ** 2).sum() ** 0.5
        
        # 修改距离奖励计算方式
        prev_dist = ((self.goal_coords - (self.coords - self._get_movement(action))) ** 2).sum() ** 0.5
        dist_reward = prev_dist - dist  # 距离减少就给予正奖励
        reward += dist_reward * 5.0  # 增加距离奖励的权重
        
        # 到达目标点
        if dist < self.tolerance:
            reward += 100  # 大幅增加目标达成奖励
            self.done = True
        
        # 如果步数过多，也结束回合
        if self.steps >= 200:
            self.done = True
        
        self.reward = reward
        return (self.goal_coords - self.coords, self.theta), reward, self.done, False, {"steps": self.steps}

    def _step(self, action):
        pass

    def _get_movement(self, action):
        # 估计动作导致的移动
        k1, k2 = 0.01, 0.5 * np.pi / 180
        lfreq, rfreq = action
        v = k1 * (lfreq or rfreq)
        omega = k2 * (-lfreq or rfreq)
        theta = self.theta + omega * 0.5
        return v * np.array([np.sin(theta), np.cos(theta)]) * 0.5

    def render(self):
        self.screen.fill((255, 255, 255)) # Clear screen
        
        # 计算相机偏移量，使蟑螂保持在视野中心
        grid_size = 40  # 1cm = 40 pixels，减小网格大小以显示更大范围
        
        # Draw grid
        for x in range(0, self.screen_width, grid_size):
            pygame.draw.line(self.screen, self.GRID_COLOR, (x, 0), (x, self.screen_height))
        for y in range(0, self.screen_height, grid_size):
            pygame.draw.line(self.screen, self.GRID_COLOR, (0, y), (self.screen_width, y))
            
        # Draw coordinate axes
        pygame.draw.line(self.screen, (0, 0, 0), (0, self.camera_offset[1]), (self.screen_width, self.camera_offset[1]), 2)
        pygame.draw.line(self.screen, (0, 0, 0), (self.camera_offset[0], 0), (self.camera_offset[0], self.screen_height), 2)
        
        # Draw all goals
        for goal in ALL_GOAL:
            goal_pos = (goal[0] * grid_size + self.camera_offset[0],
                        goal[1] * grid_size + self.camera_offset[1])
            pygame.draw.circle(self.screen, self.GOAL_COLOR, goal_pos, 5)

        # Draw current goal
        current_goal_pos = (self.goal_coords[0] * grid_size + self.camera_offset[0],
                            self.goal_coords[1] * grid_size + self.camera_offset[1])
        pygame.draw.circle(self.screen, self.CURRENT_GOAL_COLOR, current_goal_pos, 8)
        
        # Draw cockroach
        if self.coords is not None:
            # Rotate cockroach image
            rotated_cockroach = pygame.transform.rotate(self.cockroach_img, self.theta * 180 / np.pi - 90)
            # Get the rect of the rotated image
            cockroach_rect = rotated_cockroach.get_rect()
            # Calculate position (convert world coordinates to screen coordinates)
            pos = (self.coords[0] * grid_size + self.camera_offset[0],
                   self.coords[1] * grid_size + self.camera_offset[1])
            # Center the cockroach at its position
            cockroach_rect.center = pos
            # Draw the cockroach
            self.screen.blit(rotated_cockroach, cockroach_rect)
            
        # Draw observation and reward text
        dx, dy = self.goal_coords[0] - self.coords[0], self.goal_coords[1] - self.coords[1]
        observation_text = f"dx: {dx:.2f}, dy: {dy:.2f}"
        reward_text = f"Total Reward: {self.total_reward}"
        
        # 添加策略信息显示到左下角
        if self.current_action is not None:
            lfreq, rfreq= self.current_action
            action_text = f"Action: L={lfreq}, R={rfreq}"
        else:
            action_text = "Action: None"
            
        observation_surface = self.font.render(observation_text, True, self.TEXT_COLOR)
        reward_surface = self.font.render(reward_text, True, self.TEXT_COLOR)
        action_surface = self.font.render(action_text, True, self.TEXT_COLOR)
        
        # 修改文本位置
        self.screen.blit(observation_surface, (10, 10))
        self.screen.blit(reward_surface, (10, 40))
        # 将 action 信息移动到左下角
        self.screen.blit(action_surface, (10, self.screen_height - 30))
        
        # Update display
        pygame.display.flip()
        # 添加延时，控制渲染速度
        pygame.time.Clock().tick(2)  # 每0.5s更新一次
        
        # Handle pygame events
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
                return

class RNNCockroachEnv(CockroachEnv):
    def __init__(self, model_pth=None, model_cfg=('cm'), render_mode='human'):
        super().__init__(render_mode)
        if model_pth is None:
            model_pth = r"E:\25spring\FYP\pymodules\deep-models\lstm-20250518164729\best_4231.842625.pth"
        self.cockroach_model = try_get_model(model_pth)
        self.model_cfg = model_cfg

    def _reset(self, seed=0):
        self.pose_stim_history = torch.zeros((CONST_INT_SEQ_LEN-1, 5))

    def _step(self, action):
        npa = np.array([*self.coords, self.theta, *action])
        if self.model_cfg[0].startswith('px'):
            ratio = float(self.model_cfg[0][2:])
            npa[0] *= ratio
            npa[1] *= ratio
        elif self.model_cfg[0] == "m":
            npa[0] /= 100
            npa[1] /= 100
        step_tensor = torch.Tensor(npa)
        input_tensor = torch.vstack((self.pose_stim_history, step_tensor))
        original = None
        if self.model_cfg[2] == "norm":
            original = input_tensor[0,:2]
            input_tensor[:,:2] = input_tensor[:,:2] - original
        with torch.no_grad():
            output_tensor = self.cockroach_model(input_tensor.unsqueeze(0))
        output_tensor = output_tensor[0]
        if self.model_cfg[2] == "norm":
            output_tensor[:2] = original + output_tensor[:2]
        x, y, theta = output_tensor.numpy()
        if self.model_cfg[0].startswith('px'):
            ratio = float(self.model_cfg[0][2:])
            x /= ratio
            y /= ratio
        elif self.model_cfg[0] == "m":
            x *= 100
            y *= 100
        self.coords = np.array([x, y], dtype=Float)
        self.theta = np.array(theta, dtype=Float)
        self.pose_stim_history = input_tensor[1:, :]

def get_rnn_env(render_mode=None):
    mp = r"E:\25spring\FYP\pymodules\deep-models\lstm-20250527124919\best_2806.555670.pth"
    return RNNCockroachEnv(model_pth=mp, model_cfg=('px100', None, "norm"), render_mode=render_mode)

class LinearCockroachEnv(CockroachEnv):
    k1 = 0.01
    k2 = 0.5 * np.pi / 180

    def __init__(self, render_mode='human'):
        super().__init__(render_mode)

    def _step(self, action):
        k1, k2 = self.k1, self.k2
        lfreq, rfreq = action
        v = k1 * (lfreq or rfreq)
        omega = k2 * (-lfreq or rfreq)
        theta = self.theta + omega * 0.5
        self.coords += v * np.array([np.sin(theta), np.cos(theta)]) * 0.5
        self.theta = theta

