import os

# 避免Intel MKL库重复加载
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# 限制线程数为 1，避免多线程冲突
os.environ["OMP_NUM_THREADS"] = "1"

import matplotlib
# 设置 matplotlib 使用 Agg 后端，启用非交互模式
matplotlib.use("Agg")
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False 
import matplotlib.pyplot as plt


from pathlib import Path
from pprint import pprint

import gymnasium as gym
import numpy as np
from stable_baselines3.common.monitor import Monitor
from gymnasium import spaces
from stable_baselines3 import PPO


''' ============== 路径配置 ============== '''
class Config:

    def __init__(self):
        ROOT = Path(__file__).parents[0].absolute()
        # = (ROOT/ r'')
        self.model_dir      = ROOT /r'files/models'
        self.ppo_maze       = ROOT /r'files/models/ppo_maze.zip'
        self.img_dir        = ROOT /r'files/imgs'
        self.log_dir        = ROOT /r'files/logs'

    def __str__(self):
        print('\n')
        pprint(self.__dict__,width=1)
        return '\n'

conf = Config()



''' ============== 1. 创建迷宫环境 ============== '''
class MazeEnv(gym.Env):

    def __init__(self) -> None:
        super().__init__()
        self.bounds = np.array([[0,0],[10,10]])
        self.goal   = np.array([9,9])
        self.start  = np.array([0,0])
        self.traps  = np.array([[1,1],[6,6]])
        self.walls  = np.array([[1,2],[6,5]]) 
        self.total_score = 0 
        self.actions = {
            0: np.array([0,1]),  # 上
            1: np.array([1,0]),  # 右
            2: np.array([0,-1]), # 下
            3: np.array([-1,0])  # 左
        }
        self.action_space = spaces.Discrete(4)  # 有4种行动方式
        self.observation_space = spaces.Box(
            low = self.bounds[0], 
            high = self.bounds[1] -1,
            shape = (2,), 
            dtype = np.int32
        )

    ''' —— —— 重置 —— —— '''
    def reset(self, seed=None, options=None) -> tuple[np.ndarray,dict]:
        super().reset(seed=seed)
        self.total_score = 0
        self.pos = self.start.copy()
        self.history = [self.start.copy()]  # 每次重置都清空并包含起点
        return np.array(self.pos, dtype=np.int32), {} 
    
    ''' —— —— 移动 —— —— '''
    def step(self, action) -> tuple[np.ndarray,float,bool,bool,dict]:
        
        prev_pos = self.pos.copy()
        next_pos = prev_pos + self.actions[action]
        
        # 非法移动?（撞。墙、出界）
        if self.is_illegal(next_pos):
            # 非法移动惩罚
            reward, done = -2, False
        else:
            # 更新位置
            self.pos = next_pos.copy()
            # 记录位置
            self.history.append(next_pos.copy())
            
            # 终点奖励
            if np.array_equal(self.pos, self.goal):
                reward, done = 100, True
            
            # 踩到陷阱惩罚
            elif any(np.array_equal(
                    self.pos, trap) for trap in self.traps
                ):
                reward, done = -10, False
            
            # 每步惩罚
            else:
                reward, done = -0.1, False

        self.total_score += reward

        return (
            np.array(self.pos, dtype=np.int32), 
            reward,     # 奖惩情况
            done,       # 是否完成
            False,     
            {"total_score": self.total_score}
        )       
    
    ''' —— —— 非法移动？ —— —— '''
    def is_illegal(self,next_pos) -> bool:    
        # 撞墙？
        if any(np.array_equal(next_pos, wall) for wall in self.walls):
            return True
        # 出界？
        if (
            np.any(next_pos < self.bounds[0]) or 
            np.any(next_pos >= self.bounds[1])
        ):
            return True
        return False


''' ============== 2. 初始化 =============='''
gym.register(id="Maze-v0",entry_point='__main__:MazeEnv')
model_path = conf.ppo_maze
log_dir = (conf.log_dir).as_posix()
env = gym.make("Maze-v0")
env = Monitor(env, log_dir)


''' ============== 3. 训练 =============='''
def train()-> None:
    model = PPO("MlpPolicy", env, verbose=1 ,tensorboard_log=log_dir)
    model.learn(total_timesteps=50000, tb_log_name="maze_run")
    # 保存模型
    model.save(model_path)                    
    del model


''' ============== 4. 测试 =============='''
def test()-> None:
    model = PPO.load(model_path)
    obs, _ = env.reset()
    for step in range(100):
        action, _ = model.predict(obs, deterministic=True)
        obs, reward, done, _, info = env.step(int(action))
        total_score = info['total_score']
        print(f"Step {step}: 位置 {obs}, 奖励 {reward}，当前总分：{total_score:.2f},")
        if done:
            print("任务完成！" if float(reward) > 0 else "任务失败！")
            break
    del model
    print('结束')



''' ============== 5. 绘制路径 GIF 图 =============='''
def draw_gif()-> None:

    import imageio

    model_path = conf.ppo_maze
    model = PPO.load(model_path)

    gym.register(id="Maze-v0", entry_point = MazeEnv)
    env_raw = gym.make("Maze-v0").unwrapped
    obs, info = env_raw.reset()

    frames = []
    max_steps = 200

    for step in range(max_steps):
        action, _ = model.predict(obs, deterministic=True)
        obs, reward, done, _, info = env_raw.step(int(action))

        
        # 绘图
        fig, ax = plt.subplots(figsize=(5, 5))
        ax.set_xlim(0, env_raw.bounds[1][0])
        ax.set_ylim(0, env_raw.bounds[1][1])
        ax.set_xticks(range(env_raw.bounds[1][0] + 1))
        ax.set_yticks(range(env_raw.bounds[1][1] + 1))
        ax.grid(True)

        # 绘制目标、陷阱、墙体
        ax.scatter(*env_raw.goal, c='green', s=200, marker='*', label='目标')
        if env_raw.traps.size > 0:
            ax.scatter(env_raw.traps[:, 0], env_raw.traps[:, 1], c='red', s=100, label='陷阱')
        if env_raw.walls.size > 0:
            ax.scatter(env_raw.walls[:, 0], env_raw.walls[:, 1], c='black', s=100, marker='s', label='墙体')

        # 绘制路径
        hist = np.array(env_raw.history)
        ax.plot(hist[:, 0], hist[:, 1], c='blue', linewidth=2, label='轨迹')
        ax.scatter(hist[-1, 0], hist[-1, 1], c='blue', s=60, edgecolors='white', label='当前位置')

        ax.set_title("强化学习迷宫演示", fontsize=14)
        ax.legend(loc='upper left')

        # ✅ 安全兼容的图像获取方式
        fig.canvas.draw()
        image = np.array(fig.canvas.buffer_rgba())[..., :3]
        frames.append(image)
        plt.close(fig)

        if done:
            break

    # 检查帧
    if not frames:
        raise RuntimeError("没有捕获到任何帧，请检查环境运行是否正常。")

    # 保存 GIF
    out_path = conf.img_dir / 'maze_demo.gif'
    imageio.mimsave(out_path, frames, fps=4)
    print(f"✅ 已生成动图：{out_path}")


''' ============== 6. 绘制 reward 变化图 =============='''
def draw_reward()-> None:

    
    from stable_baselines3.common.results_plotter import load_results, ts2xy

    log_dir = (conf.log_dir).as_posix()
    x, y = ts2xy(load_results(log_dir), 'timesteps')
    plt.figure(figsize=(8,5))
    plt.plot(x, y)
    plt.xlabel('训练轮次')
    plt.ylabel('每轮次奖励得分')
    plt.title('PPO 训练奖励曲线')
    plt.grid(True)
    # plt.show()
    plt.savefig(conf.img_dir /'reward_curve.png')


if __name__ == '__main__':

    train()
    test()
    draw_gif()
    draw_reward()

