import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import torch.nn as nn
from agents import rl_utils

from envs.future_env import MyFuturesEnv
from model.patchtst import PatchTST
from configs.config_loader import Config



class PolicyNet(nn.Module):
    def __init__(self, state_dim, hidden_dim, action_dim):
        super().__init__()
        self.norm = nn.LayerNorm(state_dim)  # 添加归一化层
        self.fc1 = nn.Linear(state_dim + 1, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, action_dim)

    def forward(self, x, amount):
        x = self.norm(x)  # 归一化输入
        x = torch.cat((amount, x), dim=-1)  # shape: [149, 561]
        x = torch.sigmoid(self.fc1(x))
        x = x * 2 - 1
        x = self.fc2(x)
        x = torch.sigmoid(x)
        return x * 2 - 1


class ValueNet(torch.nn.Module):
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(ValueNet, self).__init__()
        self.fc1 = torch.nn.Linear(
            state_dim + action_dim + 1, hidden_dim
        )  # 输入: state + action
        self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim)
        self.fc_out = torch.nn.Linear(hidden_dim, 1)  # 输出: Q值
        self.state_norm = nn.LayerNorm(state_dim)
        self.action_norm = nn.LayerNorm(action_dim)

    def forward(self, state, action, amounts):
        # 拼接状态和动作
        state = self.state_norm(state)
        action = self.action_norm(action)
        x = torch.cat([state, action, amounts], dim=1)

        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc_out(x)


class ActorCritic:
    def __init__(
            self,
            state_dim,
            hidden_dim,
            action_dim,
            actor_lr,
            critic_lr,
            gamma,
            device,
            env,
            FeatureExtractor,
            FeatureExtractor_lr,
            FeatureExtractor_freeze_epochs,
    ):
        # 策略网络
        self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
        self.critic = ValueNet(state_dim, hidden_dim, action_dim).to(device)  # 价值网络
        # 策略网络优化器
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = torch.optim.Adam(
            self.critic.parameters(), lr=critic_lr
        )  # 价值网络优化器
        self.gamma = gamma
        self.device = device
        self.env = env
        self.FeatureExtractor = FeatureExtractor
        self.FeatureExtractor_optimizer = torch.optim.Adam(
            self.FeatureExtractor.parameters(), lr=FeatureExtractor_lr
        )
        self.FeatureExtractor_freeze_epochs = FeatureExtractor_freeze_epochs
        self.model_dir = "./checkpoints"
        os.makedirs(self.model_dir, exist_ok=True)

        self.name = "ActorCritic_Agent"

    def take_action(self, state):
        state_blc = self.process_state_to_blc(
            state,
            self.env.get_window(),
            self.env.get_future_count(),
            self.env.get_tech_num(),
        ).to(self.device)
        amount = torch.tensor(state[0], dtype=torch.float32).to(self.device)
        amount = amount.unsqueeze(0)

        state = self.FeatureExtractor(state_blc)
        state = state.reshape(-1)
        action = self.actor(state, amount)
        return action

    def q_value(self, state, action):
        """
        输入状态和动作，返回对应的 Q 值。
        :param state: 原始环境状态（包括 amount、price、tech 等）
        :param action: 动作张量，shape = [future_num] 或 [1, future_num]
        """
        # 获取 BLC 格式的状态编码
        state_blc = self.process_state_to_blc(
            state,
            self.env.get_window(),
            self.env.get_future_count(),
            self.env.get_tech_num(),
        ).to(self.device)  # shape: [b, l, c]

        amount = torch.tensor(state[0], dtype=torch.float32).unsqueeze(0).to(self.device)  # shape: [1]

        state_encoded = self.FeatureExtractor(state_blc)  # [1, d]
        state_encoded = state_encoded.reshape(1, -1)  # 确保是 2D tensor

        if isinstance(action, np.ndarray):
            action = torch.tensor(action, dtype=torch.float32)
        action = action.to(self.device).reshape(1, -1)

        q = self.critic(state_encoded, action, amount.unsqueeze(1))  # shape: [1, 1]
        return q.item()  # 返回标量

    def update(self, transition_dict, epoch):
        # 参数信息
        window = self.env.get_window()
        future_count = self.env.get_future_count()
        tech_num = self.env.get_tech_num()

        # ----------------------
        # 处理当前状态: states
        # ----------------------
        state_blc = torch.stack(
            [
                self.process_state_to_blc(s, window, future_count, tech_num)
                for s in transition_dict["states"]
            ]
        ).to(
            self.device
        )  # [B, b, l, c]

        B, b, l, c = state_blc.shape
        state_flat = state_blc.view(B * b, l, c)  # 合并前两个维度 [B*b, l, c]
        state_enc = self.FeatureExtractor(state_flat)  # PatchTST 输出 [B*b, d_ff, c]
        # state_enc = state_enc.view(B, b, -1)               # 还原为 [B, b, d]
        states = state_enc.reshape(B, -1)  # 最终 [B, b*d]

        amounts = (
            torch.tensor([s[0] for s in transition_dict["states"]], dtype=torch.float32)
            .unsqueeze(1)
            .to(self.device)
        )  # shape: [B, 1]

        # ----------------------
        # 动作和奖励
        # ----------------------
        actions = torch.stack(transition_dict["actions"]).to(self.device)
        actions = actions.reshape(actions.size(0), -1)

        rewards = (
            torch.tensor(transition_dict["rewards"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )
        dones = (
            torch.tensor(transition_dict["dones"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )

        # ----------------------
        # 处理 next_states
        # ----------------------
        next_state_blc = torch.stack(
            [
                self.process_state_to_blc(s, window, future_count, tech_num)
                for s in transition_dict["next_states"]
            ]
        ).to(
            self.device
        )  # [B, b, l, c]

        next_flat = next_state_blc.view(B * b, l, c)  # [B*b, l, c]
        next_enc = self.FeatureExtractor(next_flat)  # [B*b, d]
        # next_enc = next_enc.view(B, b, -1)                  # [B, b, d]
        next_states = next_enc.reshape(B, -1)  # [B, b*d]

        next_amounts = (
            torch.tensor(
                [s[0] for s in transition_dict["next_states"]], dtype=torch.float32
            )
            .unsqueeze(1)
            .to(self.device)
        )  # shape: [B, 1]

        # ----------------------
        # 计算目标 Q 值
        # ----------------------
        with torch.no_grad():
            next_actions = self.actor(next_states, next_amounts)
            target_q = rewards + self.gamma * self.critic(
                next_states, next_actions, next_amounts
            ) * (1 - dones)

        # ----------------------
        # 计算当前 Q 值 和 loss
        # ----------------------
        current_q = self.critic(states, actions, amounts)
        critic_loss = F.mse_loss(current_q, target_q)

        actor_actions = self.actor(states, amounts)
        actor_loss = -torch.mean(self.critic(states, actor_actions, amounts))

        # ----------------------
        # 优化器更新
        # ----------------------
        self.actor_optimizer.zero_grad()
        self.critic_optimizer.zero_grad()
        self.FeatureExtractor_optimizer.zero_grad()

        actor_loss.backward(retain_graph=True)
        critic_loss.backward()


        self.critic_optimizer.step()
        # self.patchtst_optimizer.step()
        if epoch >= self.FeatureExtractor_freeze_epochs:
            self.FeatureExtractor_optimizer.step()
            self.actor_optimizer.step()

    def process_state_to_blc(self, state, window, future_count, tech_num):
        offset = 1 + future_count
        price_flat = state[offset: offset + window * future_count]
        tech_flat = state[offset + window * future_count:]
        price = price_flat.reshape(window, future_count).T
        tech = tech_flat.reshape(window, future_count, tech_num).transpose(1, 0, 2)
        price = price[:, :, np.newaxis]
        out = np.concatenate([price, tech], axis=-1)
        out = torch.tensor(out, dtype=torch.float32)
        return self.norm_blc(out)

    def norm_blc(self, x):
        means = x.mean(1, keepdim=True).detach()
        x = x - means
        stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x /= stdev
        return x

    def save_model(self):
        torch.save(
            self.FeatureExtractor.state_dict(),
            self._get_model_path(self.FeatureExtractor.name),
        )
        torch.save(self.actor.state_dict(), self._get_model_path("actor"))
        torch.save(self.critic.state_dict(), self._get_model_path("critic"))

    def load_model(self):
        self.FeatureExtractor.load_state_dict(
            torch.load(self._get_model_path(self.FeatureExtractor.name))
        )
        self.actor.load_state_dict(torch.load(self._get_model_path("actor")))
        self.critic.load_state_dict(torch.load(self._get_model_path("critic")))

    def load_featureExtractor(self):
        self.FeatureExtractor.load_state_dict(
            torch.load(self._get_model_path(self.FeatureExtractor.name))
        )

    def _get_model_path(self, module_name: str):
        return os.path.join(self.model_dir, f"{self.name}_{module_name}.pth")


if __name__ == "__main__":
    cfg = Config(yaml_path="./configs/config_mini.yaml")

    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    config_env = cfg.env
    env = MyFuturesEnv(config_env)
    patchtst_config = type("PatchTSTConfig", (object,), cfg.patchtst)
    # patchtst_config.seq_len =TODO
    patchtst = PatchTST(config=patchtst_config).to(cfg.patchtst["device"])

    agent = ActorCritic(
        env.get_future_count() * cfg.patchtst["window"] * (env.get_tech_num() + 1),
        cfg.agent["hidden_dim"],
        env.action_dim,
        cfg.agent["actor_lr"],
        cfg.agent["critic_lr"],
        cfg.agent["gamma"],
        cfg.agent["device"],
        env,
        patchtst,
        cfg.patchtst["lr"],
    )

    num_episodes = 1000
    # return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)

    return_list = []
    for i_episode in range(int(num_episodes / 10)):
        episode_return = 0
        transition_dict = {
            "states": [],
            "actions": [],
            "next_states": [],
            "rewards": [],
            "dones": [],
        }
        state = env.reset()
        done = False
        while not done:
            action = agent.take_action(state)
            next_state, reward, done, _ = env.step(action)
            transition_dict["states"].append(state)
            transition_dict["actions"].append(action)
            transition_dict["next_states"].append(next_state)
            transition_dict["rewards"].append(reward)
            transition_dict["dones"].append(done)
            state = next_state
            episode_return += reward
        return_list.append(episode_return)
        agent.update(transition_dict)
        print("epoch:", i_episode)

    episodes_list = list(range(len(return_list)))
    plt.plot(episodes_list, return_list)
    plt.xlabel("Episodes")
    plt.ylabel("Returns")
    plt.title("Actor-Critic on {}".format(env.env_name))
    plt.show()

    mv_return = rl_utils.moving_average(return_list, 9)
    plt.plot(episodes_list, mv_return)
    plt.xlabel("Episodes")
    plt.ylabel("Returns")
    plt.title("Actor-Critic on {}".format(env.env_name))
    plt.show()
