﻿import gymnasium as gym
import time

import numpy as np

# 创建动作名称映射字典
action_names = {0: "左(LEFT)", 1: "下(DOWN)", 2: "右(RIGHT)", 3: "上(UP)"}


class FrozenLakeEnv:
    def __init__(self):
        self.env = gym.make("FrozenLake-v1", render_mode="human", is_slippery=False)
        self.env.reset()

    def step(self, action):
        # 获取执行前的状态和地图信息
        prev_state = self.env.unwrapped.s
        desc = self.env.unwrapped.desc  # 获取地图布局的二维数组
    
        # 执行动作
        obs, reward, done, truncated, info = self.env.step(action)
    
        # 获取执行后的状态
        current_state = self.env.unwrapped.s
        ncol = self.env.unwrapped.ncol
    
        # 计算当前网格坐标
        row = current_state // ncol
        col = current_state % ncol
    
        # 检测掉入冰洞的逻辑
        if desc[row][col] == b"H":  # 注意这里是字节字符串
            reward -= 20000  # 自定义扣分（原环境默认reward=0）
            done = True    # 确保终止回合（原环境已设置，但显式声明更安全）
            info["fell_into_hole"] = True
        else:
            info["fell_into_hole"] = False
    
        # 撞墙检测（原有逻辑）
        if current_state == prev_state and desc[row][col] != b"G":
            reward -= 10000
            info["hit_wall"] = True
        else:
            info["hit_wall"] = False
        
        # 如果到终点奖励
        if desc[row][col] == b"G":  # 注意这里是字节字符串
            reward += 10000    
            info["arrive at the destination"] = True
        else:
            info["arrive at the destination"] = False
    
        return obs, reward, done, truncated, info

    def render(self):
        return self.env.render()

    def close(self):
        return self.env.close()

    def get_current_position(self):
        """返回智能体的当前位置（网格坐标和状态编号）"""
        # 获取底层环境的状态（通过self.env访问）
        state = self.env.unwrapped.s  # 注意这里改为 self.env.unwrapped

        # 转换为网格坐标
        ncol = self.env.unwrapped.ncol  # 同样访问 self.env.unwrapped
        row = state // ncol
        col = state % ncol

        return {"grid_position": (row, col), "state_index": state}
