#!/usr/bin/env python3

"""
AI2-THOR环境适配器
将Neural-SLAM与AI2-THOR环境集成
"""

import numpy as np
import torch
import gymnasium as gym
from gymnasium import spaces
import ai2thor
from ai2thor.controller import Controller
import cv2
import math

class AI2ThorNeuralSLAMEnv(gym.Env):
    """AI2-THOR环境适配器，兼容Neural-SLAM接口"""
    
    def __init__(self, 
                 scene_name="FloorPlan1_physics",
                 frame_width=128,
                 frame_height=128,
                 max_episode_length=1000,
                 camera_height=1.25,
                 hfov=90.0):
        
        super().__init__()
        
        self.scene_name = scene_name
        self.frame_width = frame_width
        self.frame_height = frame_height
        self.max_episode_length = max_episode_length
        self.camera_height = camera_height
        self.hfov = hfov
        
        # 初始化AI2-THOR控制器
        self.controller = Controller(
            scene=scene_name,
            gridSize=0.25,
            width=640,
            height=480,
            renderDepthImage=True,
            renderInstanceSegmentation=True
        )
        
        # 定义动作空间 (前进, 左转, 右转)
        self.action_space = spaces.Discrete(3)
        
        # 定义观察空间 (RGB + Depth)
        self.observation_space = spaces.Box(
            low=0, high=255, 
            shape=(4, frame_height, frame_width),  # RGB + Depth
            dtype=np.uint8
        )
        
        # 环境状态
        self.episode_length = 0
        self.agent_position = None
        self.agent_rotation = None
        self.visited_positions = set()
        self.total_explored_area = 0
        
        # 地图相关
        self.map_size = 200  # 20m x 20m map
        self.map_resolution = 0.1  # 10cm per pixel
        self.occupancy_map = np.zeros((self.map_size, self.map_size), dtype=np.float32)
        self.explored_map = np.zeros((self.map_size, self.map_size), dtype=np.float32)
        
    def reset(self, seed=None, options=None):
        """重置环境"""
        super().reset(seed=seed)
        
        # 重置控制器
        self.controller.reset(scene=self.scene_name)
        
        # 重置状态
        self.episode_length = 0
        self.visited_positions = set()
        self.total_explored_area = 0
        
        # 重置地图
        self.occupancy_map.fill(0)
        self.explored_map.fill(0)
        
        # 获取初始状态
        event = self.controller.step(action="GetReachablePositions")
        self.agent_position = event.metadata['agent']['position']
        self.agent_rotation = event.metadata['agent']['rotation']
        
        # 更新地图
        self._update_maps()
        
        # 获取初始观察
        obs = self._get_observation()
        
        # 计算初始sensor_pose
        sensor_pose = [
            self.agent_position['x'],  # x coordinate
            self.agent_position['z'],  # z coordinate (Neural-SLAM中的y)
            self.agent_rotation['y']   # rotation around y-axis
        ]
        
        info = {
            'agent_position': self.agent_position,
            'agent_rotation': self.agent_rotation,
            'episode_length': self.episode_length,
            'explored_area': self.total_explored_area,
            'sensor_pose': sensor_pose  # 添加Neural-SLAM需要的pose信息
        }
        
        return obs, info
    
    def step(self, action):
        """执行动作"""
        # 执行AI2-THOR动作
        if action == 0:  # 前进
            event = self.controller.step(action="MoveAhead")
        elif action == 1:  # 左转
            event = self.controller.step(action="RotateLeft", degrees=10)
        elif action == 2:  # 右转
            event = self.controller.step(action="RotateRight", degrees=10)
        else:
            raise ValueError(f"Invalid action: {action}")
        
        # 更新状态
        self.episode_length += 1
        self.agent_position = event.metadata['agent']['position']
        self.agent_rotation = event.metadata['agent']['rotation']
        
        # 更新地图
        self._update_maps()
        
        # 计算奖励
        reward = self._calculate_reward()
        
        # 检查是否结束
        done = self.episode_length >= self.max_episode_length
        
        # 获取观察
        obs = self._get_observation()
        
        # 计算sensor_pose (x, y, orientation)，兼容原始Neural-SLAM
        sensor_pose = [
            self.agent_position['x'],  # x coordinate
            self.agent_position['z'],  # z coordinate (Neural-SLAM中的y)
            self.agent_rotation['y']   # rotation around y-axis
        ]
        
        info = {
            'agent_position': self.agent_position,
            'agent_rotation': self.agent_rotation,
            'episode_length': self.episode_length,
            'explored_area': self.total_explored_area,
            'success': event.metadata['lastActionSuccess'],
            'collision': not event.metadata['lastActionSuccess'] and action == 0,
            'sensor_pose': sensor_pose  # 添加Neural-SLAM需要的pose信息
        }
        
        return obs, reward, done, False, info
    
    def _get_observation(self):
        """获取观察"""
        # 获取RGB图像
        rgb_frame = self.controller.last_event.frame
        rgb_frame = cv2.resize(rgb_frame, (self.frame_width, self.frame_height))
        rgb_frame = rgb_frame.transpose(2, 0, 1)  # HWC -> CHW
        
        # 获取深度图像
        depth_frame = self.controller.last_event.depth_frame
        depth_frame = cv2.resize(depth_frame, (self.frame_width, self.frame_height))
        depth_frame = np.expand_dims(depth_frame, axis=0)  # Add channel dimension
        
        # 组合观察 (RGB + Depth) - 与原始Neural-SLAM兼容
        obs = np.concatenate([rgb_frame, depth_frame], axis=0)
        
        return obs.astype(np.uint8)
    
    def _update_maps(self):
        """更新占用地图和探索地图"""
        # 将世界坐标转换为地图坐标
        map_x = int(self.agent_position['x'] / self.map_resolution + self.map_size // 2)
        map_z = int(self.agent_position['z'] / self.map_resolution + self.map_size // 2)
        
        # 确保坐标在范围内
        map_x = np.clip(map_x, 0, self.map_size - 1)
        map_z = np.clip(map_z, 0, self.map_size - 1)
        
        # 更新探索地图
        if (map_x, map_z) not in self.visited_positions:
            self.visited_positions.add((map_x, map_z))
            self.explored_map[map_x, map_z] = 1.0
            self.total_explored_area += self.map_resolution ** 2
    
    def _calculate_reward(self):
        """计算奖励"""
        # 基于探索面积的奖励
        exploration_reward = len(self.visited_positions) * 0.01
        
        # 基于移动距离的奖励（鼓励探索）
        movement_reward = 0.1
        
        # 碰撞惩罚
        collision_penalty = -0.5 if not self.controller.last_event.metadata['lastActionSuccess'] else 0
        
        total_reward = exploration_reward + movement_reward + collision_penalty
        return total_reward
    
    def get_short_term_goal(self, inputs):
        """获取短期目标（兼容Neural-SLAM接口）"""
        # 简化实现：返回随机目标
        angle = np.random.randint(0, 72)  # 0-71 (5度间隔)
        distance = np.random.randint(1, 25)  # 1-24
        
        return np.array([[angle, distance, 0]], dtype=np.float32)  # 最后一个0是dummy action
    
    def get_rewards(self, inputs):
        """获取奖励（兼容Neural-SLAM接口）"""
        return np.array([self.total_explored_area], dtype=np.float32)
    
    def close(self):
        """关闭环境"""
        if hasattr(self, 'controller'):
            self.controller.stop()

class AI2ThorVecEnv:
    """AI2-THOR向量化环境"""
    
    def __init__(self, num_envs=4, **kwargs):
        self.num_envs = num_envs
        self.envs = []
        
        # 创建多个环境实例
        scenes = [
            "FloorPlan1_physics",
            "FloorPlan2_physics", 
            "FloorPlan3_physics",
            "FloorPlan4_physics"
        ]
        
        for i in range(num_envs):
            scene = scenes[i % len(scenes)]
            env = AI2ThorNeuralSLAMEnv(scene_name=scene, **kwargs)
            self.envs.append(env)
        
        # 设置观察和动作空间
        self.observation_space = self.envs[0].observation_space
        self.action_space = self.envs[0].action_space
    
    def reset(self):
        """重置所有环境"""
        observations = []
        infos = []
        
        for env in self.envs:
            obs, info = env.reset()
            observations.append(obs)
            infos.append(info)
        
        return np.stack(observations), infos
    
    def step(self, actions):
        """执行动作"""
        observations = []
        rewards = []
        dones = []
        infos = []
        
        for i, (env, action) in enumerate(zip(self.envs, actions)):
            obs, reward, done, truncated, info = env.step(action)
            observations.append(obs)
            rewards.append(reward)
            dones.append(done or truncated)
            infos.append(info)
        
        return (np.stack(observations), 
                np.array(rewards), 
                np.array(dones), 
                infos)
    
    def get_short_term_goal(self, inputs):
        """获取短期目标"""
        goals = []
        for env in self.envs:
            goal = env.get_short_term_goal(inputs[0])  # 简化处理
            goals.append(goal)
        return np.stack(goals)
    
    def get_rewards(self, inputs):
        """获取奖励"""
        rewards = []
        for env in self.envs:
            reward = env.get_rewards(inputs[0])  # 简化处理
            rewards.append(reward)
        return np.stack(rewards)
    
    def close(self):
        """关闭所有环境"""
        for env in self.envs:
            env.close()

def make_ai2thor_envs(args):
    """创建AI2-THOR环境"""
    return AI2ThorVecEnv(
        num_envs=args.num_processes,
        frame_width=args.frame_width,
        frame_height=args.frame_height,
        max_episode_length=args.max_episode_length,
        camera_height=args.camera_height,
        hfov=args.hfov
    )
