import gymnasium as gym
from stable_baselines3 import PPO
from stable_baselines3.common.env_checker import check_env
from dl_rl.rl_env import TradingEnv
from dl_rl.dl_model import TransformerFeatureExtractor
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.monitor import Monitor
import os
from stable_baselines3.common.callbacks import BaseCallback
import pandas as pd
import csv

class TrainingLoggerCallback(BaseCallback):
    """
    自定义回调：记录 PPO 训练中的关键指标到 CSV
    """

    def __init__(self, log_path="logs/train/training_metrics.csv", verbose=1):
        super().__init__(verbose)
        self.log_path = log_path

        os.makedirs(os.path.dirname(log_path), exist_ok=True)

        # 初始化 CSV 文件
        with open(self.log_path, "w", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                "timesteps",
                "approx_kl",
                "clip_fraction",
                "clip_range",
                "entropy_loss",
                "loss",
                "n_updates",
                "policy_gradient_loss",
                "value_loss",
                "ep_len_mean",
                "ep_rew_mean",
            ])

    def _on_step(self) -> bool:
        # 这个函数必须返回 True，否则训练会中断
        return True    

    def _on_rollout_end(self) -> None:
        """
        在每次 rollout 结束后记录一次指标
        """
        d = self.model.logger.name_to_value  # 当前日志字典

        def get(k):
            # 优先取 train、其次 rollout、最后 time
            return d.get(f"train/{k}") or d.get(f"rollout/{k}") or d.get(f"time/{k}")

        with open(self.log_path, "a", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                self.num_timesteps,
                get("approx_kl"),
                get("clip_fraction"),
                get("clip_range"),
                get("entropy_loss"),
                get("loss"),
                get("n_updates"),
                get("policy_gradient_loss"),
                get("value_loss"),
                get("ep_len_mean"),
                get("ep_rew_mean"),
            ])

        if self.verbose > 0:
            print(f"[Logger] Saved metrics at {self.num_timesteps} timesteps.")

callback = TrainingLoggerCallback("logs/training_metrics.csv")

log_dir = "./logs"
os.makedirs(log_dir, exist_ok=True)


# 创建环境实例
env = TradingEnv(csv_path="./data/20240731/sh/600000/order.csv", symbol="600000")
env = Monitor(env, log_dir)

# 可选：检查环境是否合规
check_env(env, warn=True)

class CustomPPOPolicy(ActorCriticPolicy):
    """
    将 TransformerFeatureExtractor 用作 PPO 的特征提取器
    """
    def __init__(self, *args, **kwargs):
        super().__init__(
            *args,
            **kwargs,
            features_extractor_class=TransformerFeatureExtractor,
            features_extractor_kwargs=dict(d_model=128, nhead=4, num_layers=2, dim_feedforward=256)
        )

# 初始化 PPO 模型
model = PPO(CustomPPOPolicy, env, verbose=1)

# 开始训练
# 控制所有训练一共交互的步数
model.learn(total_timesteps=10000, callback=callback)

# 保存模型
model.save("ppo_trading_model")