import numpy as np
from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3.common.vec_env import SubprocVecEnv
from strategy.rl.envs.env2 import RLPredEnv2
import torch
import torch.nn as nn
from trade.model_base import FitBase


class LstmFeatureExtractor(BaseFeaturesExtractor):
    def __init__(self, observation_space, hidden_size=8):
        self.history = 40
        self.seqfeature = 9
        self.hidden_size = hidden_size
        self.other_size = 3
        self.num_layers = 2
        super(LstmFeatureExtractor, self).__init__(observation_space, self.features_dim)
        if self.seqfeature > 0:
            self.seqmodel = nn.LSTM(
                self.seqfeature, self.hidden_size, self.num_layers, batch_first=True
            )

    @property
    def features_dim(self):
        return self.hidden_size + self.other_size

    def forward(self, X: torch.Tensor):
        batch = X.shape[0]
        seqflatten_size = self.seqfeature * self.history
        if self.seqfeature > 0:
            h_0 = torch.zeros(
                self.num_layers, batch, self.hidden_size, device=X.device
            )  # 初始隐藏状态
            c_0 = torch.zeros(
                self.num_layers, batch, self.hidden_size, device=X.device
            )  # 初始细胞状态
            seqdata = X[:, :seqflatten_size].view(
                [batch, self.history, self.seqfeature]
            )
            output, (h_n, c_n) = self.seqmodel(seqdata, (h_0, c_0))
            seqout = output[:, -1, :]

        if seqflatten_size < X.shape[1]:
            addfeature = X[:, seqflatten_size:]
            combine_input = torch.concatenate([seqout, addfeature], dim=1)
        elif seqflatten_size == 0:
            combine_input = X
        else:
            combine_input = seqout

        return combine_input


class RLFitModel(FitBase):
    def __init__(
        self,
        rl_model: str = "PPO",
        policy: str = "MlpPolicy",
    ):
        self.model = None
        model_map = dict(PPO=PPO, SAC=SAC, A2C=A2C, DQN=DQN, TD3=TD3, DDPG=DDPG)
        self.model_type = model_map[rl_model]
        self.policy = policy
        self.policy_kwargs = dict(
            net_arch=dict(
                pi=[64, 16], vf=[64, 16]
            ),  # 策略网络 (pi) 和价值网络 (vf) 的两层，每层 64 个单元
            features_extractor_class=LstmFeatureExtractor,
            features_extractor_kwargs=dict(),
        )

    def load_model(self, file: str):
        self.model = self.model_type.load(file)

    def save_model(self, file: str):
        self.model.save(file)

    def fit(self, X, y, verbose: int = 1, total_timesteps=50000):

        env = SubprocVecEnv([lambda: RLPredEnv2(X, y) for i in range(4)])
        model = self.model_type(
            self.policy,
            env,
            policy_kwargs=self.policy_kwargs,
            verbose=verbose,
            device="cpu",
            n_steps=256,
        )
        env.reset()
        model.learn(total_timesteps=total_timesteps)
        self.model = model

    def predict(self, X):
        env = RLPredEnv2(X, np.zeros(X.shape[0]))
        # model = self.model_type(self.policy, env, verbose=verbose)
        env.reset()
        model = self.model
        obs, _ = env.reset()
        done = False
        total_reward = 0  # 用于记录总奖励
        # 运行一个回合
        results = []
        while not done:
            # 使用模型预测动作
            action, _states = model.predict(obs, deterministic=True)
            # 与环境交互
            obs, reward, done, _, info = env.step(action)
            # 累加奖励
            if "gain" in info:
                total_reward += info["gain"]
            else:
                total_reward += reward
            results.append(action - 1)

        print(f"Total gain in one episode: {total_reward}")
        return np.array(results)
