from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize, SubprocVecEnv
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from notepad_env import NotepadEnv
import os
import torch

# ----------------------------------------
# 1. 自定义回调：监控训练
# ----------------------------------------
class TrainingCallback(BaseCallback):
    def __init__(self, verbose=0):
        super(TrainingCallback, self).__init__(verbose)
        self.total_timesteps = 0

    def _on_step(self) -> bool:
        self.total_timesteps += 1
        if self.total_timesteps % 500 == 0:
            print(f"✅ 已训练 {self.total_timesteps} 个 timesteps")
        return True


# ----------------------------------------
# 2. 创建环境（建议使用 VecNormalize）
# ----------------------------------------
def make_env():
    return NotepadEnv()

def main():
    # 使用单环境 + 归一化（适合调试）
    env = DummyVecEnv([make_env])
    env = VecNormalize(env, norm_obs=False, norm_reward=True, clip_reward=50.0)

    # 如果你想用并行环境（更快训练），取消注释下面两行：
    # env = SubprocVecEnv([make_env for _ in range(4)])
    # env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_reward=50.0)

    # ----------------------------------------
    # 3. 定义模型
    # ----------------------------------------
    # 修改这一行
    model = PPO(
        "MlpPolicy",  # 将 "CnnPolicy" 改为 "MlpPolicy"
        env,
        verbose=1,
        tensorboard_log="./ppo_notepad_tensorboard/",
        learning_rate=3e-5,
        n_steps=20,
        batch_size=20,
        n_epochs=10,
        gamma=0.99,
        ent_coef=0.01,
        clip_range=0.2,
        device="auto",
        policy_kwargs=dict(
            net_arch=[dict(pi=[64, 64], vf=[64, 64])],
            # normalize_images=False 这一行可以删除或保留，对 MlpPolicy 没有影响
        )
    )

    print("开始训练... 按 Ctrl+C 可中断")

    # ----------------------------------------
    # 4. 设置评估回调（关键：自动保存最佳模型）
    # ----------------------------------------
    eval_env = DummyVecEnv([make_env])
    eval_env = VecNormalize(eval_env, norm_obs=False, norm_reward=True, clip_reward=50.0)

    eval_callback = EvalCallback(
        eval_env,
        best_model_save_path="./logs/",
        log_path="./logs/",
        eval_freq=100,               # 每100步评估一次
        deterministic=True,          # 评估时使用确定性策略
        render=False,
        n_eval_episodes=5,
        verbose=1
    )

    # ----------------------------------------
    # 5. 开始训练（至少10000步！）
    # ----------------------------------------
    try:
        model.learn(
            total_timesteps=1000,       # 关键：训练足够步数
            callback=[TrainingCallback(), eval_callback],  # 多个回调
            reset_num_timesteps=False    # 如果继续训练，设为 False
        )
        model.save("ppo_notepad_final")  # 保存最终模型（非最佳）
        env.save("env_norm.pkl")         # 保存归一化参数
        print("✅ 训练完成！最终模型已保存")
    except KeyboardInterrupt:
        print("⚠️ 训练中断")

    # ----------------------------------------
    # 6. 评估：加载【最佳模型】进行测试m
    # ----------------------------------------
    print("\n" + "="*50)
    print("📊 开始评估【最佳模型】...")
    print("="*50)

    try:
        # ✅ 重要：加载的是 best_model，不是最后模型
        model = PPO.load("./logs/best_model", env=eval_env)  # 添加 env 参数


        
        env = VecNormalize.load("env_norm.pkl", env)
        env.training = False
        env.norm_reward = False  # 评估时关闭 reward 归一化

        obs = env.reset()
        total_reward = 0
        done = False
        step_count = 0

        while not done and step_count < 30:
            action, _states = model.predict(obs, deterministic=True)
            obs, reward, done, info = env.step(action)
            total_reward += reward
            step_count += 1
            print(f"步骤 {step_count}: 动作={action}, 奖励={reward:.2f}, 累计奖励={total_reward:.2f}")

        print(f"\n🎉 评估完成，总奖励: {total_reward:.2f}")

    except Exception as e:
        print(f"❌ 加载最佳模型失败: {e}")
        print("尝试评估最后模型...")
        # 备用：评估最后模型
        model = PPO.load("ppo_notepad_final")
        obs = env.reset()
        total_reward = 0
        done = False
        step_count = 0
        while not done and step_count < 30:
            action, _states = model.predict(obs, deterministic=True)
            obs, reward, done, info = env.step(action)
            total_reward += reward
            step_count += 1
        print(f"评估完成，总奖励: {total_reward:.2f}")

    # ----------------------------------------
    # 7. 检查生成的文件
    # ----------------------------------------
    print("\n🔍 检查生成的文件...")

    # 假设你的环境保存的文件名是 rl_text_xxx.txt
    generated_files = [f for f in os.listdir(".") if f.startswith("rl_text_") and f.endswith(".txt")]

    if generated_files:
        for file_name in generated_files[-3:]:  # 显示最近3个
            try:
                with open(file_name, 'r', encoding='utf-8') as f:
                    content = f.read()
                    print(f"📄 文件 {file_name} 内容: {repr(content)}")
            except Exception as e:
                print(f"❌ 读取文件 {file_name} 时出错: {e}")
    else:
        print("❌ 未生成文件（可能任务未完成）")

if __name__ == '__main__':
    main()