import gym
import numpy as np
import torch
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from configs.config_loader2 import Config2
from envs.gym_env import NewFuturesEnvLBC
from model.transform_with_time_dist import (
    CombinedModel,
)
from presentation import show
from presentation.show import calc_portfolio_metrics
from typing import Callable


class CombinedModelExtractor(BaseFeaturesExtractor):
    """
    这个类是连接你的深度学习模型和 SB3 框架的桥梁。
    """

    def __init__(self, observation_space: gym.spaces.Box, features_dim: int):
        super().__init__(observation_space, features_dim)
        obs_shape = observation_space.shape
        future_num = obs_shape[0]
        channel = obs_shape[-1]
        HIDDEN_DIM = 768
        self.combined_model = CombinedModel(channel, HIDDEN_DIM, 1).to(device)
        # 1️⃣ 初始化模型结构
        self.combined_model = CombinedModel(channel, HIDDEN_DIM, 1).to(device)
        pretrained_path = "checkpoints/combined_model_normalized.pth"
        # 2️⃣ 加载预训练权重
        state_dict = torch.load(pretrained_path, map_location=device)
        self.combined_model.load_state_dict(state_dict, strict=True)
        print(f"[✅] Loaded pretrained weights from {pretrained_path}")

        # 3️⃣ 冻结参数
        for param in self.combined_model.parameters():
            param.requires_grad = False
        print("[🔒] Feature extractor parameters frozen.")

        # 4️⃣ 切换到评估模式
        self.combined_model.eval()

    def forward(self, observations: torch.Tensor) -> torch.Tensor:
        with torch.no_grad():
            features = self.combined_model(observations.float(), mode="rl")
            return features.view(features.shape[0], -1)


def linear_schedule(initial_value: float, end_value: float) -> Callable[[float], float]:
    """
    返回一个线性插值的学习率调度器。

    :param initial_value: 初始学习率
    :param end_value: 最终学习率
    :return: 接收一个从1到0的进度参数，返回当前学习率的函数
    """

    def func(progress_remaining: float) -> float:
        """
        根据剩余进度计算当前学习率。

        :param progress_remaining: 1 表示开始, 0 表示结束
        :return: 当前学习率
        """
        return end_value + progress_remaining * (initial_value - end_value)

    return func


LOG_INTERVAL_EPISODES = 1
TENSORBOARD_LOG_PATH = "./tensorboard_logs/sac_futures/"
TRAIN = True
# ---------------------------
# 1. 构建环境
# ---------------------------
cfg = Config2(yaml_path="configs/config_A.yaml")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
config_env = cfg.env

df = config_env["df"]
total_length = df.shape[0]
train_ratio = 0.8
train_size = int(total_length * train_ratio)

train_df = df[:train_size]
val_df = df[train_size:]

train_config = config_env.copy()
train_config["df"] = train_df
train_config["if_train"] = True

val_config = config_env.copy()
val_config["df"] = val_df
val_config["if_train"] = False

env1 = NewFuturesEnvLBC(train_config)
# env1 = Monitor(env1)

env2 = NewFuturesEnvLBC(val_config)
# env2 = Monitor(env2)


obs_shape = env1.observation_space.shape
channel = obs_shape[-1]
future_num = obs_shape[0]
lr_scheduler = linear_schedule(3e-4, 1e-5)

# ---------------------------
# 2. 定义和训练 SAC 模型 (这是主要变化点)
# ---------------------------
eval_callback = EvalCallback(
    env2,
    best_model_save_path="./best_model/",
    log_path="./logs/",
    eval_freq=10000,
)
policy_kwargs = dict(
    features_extractor_class=CombinedModelExtractor,
    features_extractor_kwargs=dict(features_dim=channel * future_num),
)

MODEL_SAVE_PATH = "sac_futures_model.zip"

# 创建 SAC 模型，而不是 PPO
# 注意 Off-Policy 算法有一些新的重要参数
model = SAC(
    "MlpPolicy",
    env1,
    policy_kwargs=policy_kwargs,
    verbose=4,
    device=device,
    learning_rate=lr_scheduler,
    buffer_size=20000,
    learning_starts=512,
    batch_size=256,
    train_freq=(8, "step"),
    gamma=0.95,
    tau=0.005,
    tensorboard_log=TENSORBOARD_LOG_PATH,
)
# 验证维度是否正确
print("-" * 50)
print(f"原始观测空间形状: {env1.observation_space.shape}")
print("-" * 50)

if TRAIN:
    print("开始训练...")
    # 这里的训练步数可以增加，100 只是为了快速演示

    model.learn(
        total_timesteps=150000,
        callback=eval_callback,
        log_interval=LOG_INTERVAL_EPISODES,  # [!!!] 核心配置
    )
    print("训练完成！")

    # ---------------------------
    # 3. 保存训练好的模型
    # ---------------------------
    print(f"\n正在保存模型到: {MODEL_SAVE_PATH}")
    model.save(MODEL_SAVE_PATH)
    print("模型保存成功！")

    # 清理内存中的旧模型（可选，为了确保加载的是磁盘上的模型）
    del model


# ---------------------------
# 4. 加载模型并进行评估
# ---------------------------
print("\n" + "=" * 50)
print("开始加载模型并进行评估...")
print("=" * 50)

# 加载模型
# 注意：加载时需要提供环境实例（env2），以便 SB3 设置正确的动作空间等参数
print(f"从 {MODEL_SAVE_PATH} 加载模型...")
loaded_model = SAC.load(MODEL_SAVE_PATH, env=env2)  # 使用 SAC.load
print("模型加载成功！")


# 重置验证环境，获取初始观测值
obs, _ = env2.reset()
done = False
total_reward = 0
step_count = 0
rewards_list = []

while not done:
    # 使用加载的模型 (loaded_model) 来获取动作
    action, _states = loaded_model.predict(obs, deterministic=True)

    # 在环境中执行动作
    obs, reward, done, truncated, info = env2.step(action)

    # 打印每一步的信息
    action_str = np.array2string(action, formatter={"float_kind": lambda x: "%.2f" % x})
    print(f"Step: {step_count + 1} | Action: {action_str} | Reward: {reward:.4f}")

    # 累加奖励
    total_reward += reward
    rewards_list.append(reward)
    step_count += 1

    if "account_value" in info:
        print(f"  -> Account Value: {info['account_value']:.2f}")

print("\n评估完成！")
print("-" * 50)
print(f"总步数: {step_count}")
print(f"总奖励 (Total Reward): {total_reward:.4f}")
if step_count > 0:
    print(f"平均每步奖励 (Average Reward): {np.mean(rewards_list):.4f}")

if hasattr(env2, "save_asset_memory"):
    print("\n正在获取回测账户价值详情...")
    df_account_value = env2.save_asset_memory()
    print("账户价值变化 DataFrame:")
    print(df_account_value.head())
    print(calc_portfolio_metrics(df_account_value["account_value"]))
actions, price = env2.get_plot_data()
show.plot_price_and_actions(prices=price, actions=actions)
