import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import utils.utils as utils


class PolicyNet(nn.Module):
    def __init__(self, nf, n_var, hidden_dim, future_count, action_dim=7):
        super().__init__()
        self.nf = nf
        self.n_var = n_var
        self.future_count = future_count

        # 结构特征提取层（局部处理）
        self.struct_processor = nn.Sequential(
            nn.LayerNorm(n_var),
            nn.Linear(n_var, hidden_dim),
            nn.GELU(),
            # 跨 patch 聚合
            nn.Linear(hidden_dim * nf, hidden_dim * 2),
            nn.Unflatten(-1, (2, hidden_dim)),
            nn.GELU(),
        )

        # 动态权重生成器（给每个品种定制）
        self.weight_generator = nn.Linear(hidden_dim, future_count * hidden_dim)

        # 共享决策头（对每个品种都一样）
        self.base_policy = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.LayerNorm(hidden_dim // 2),
            nn.GELU(),
            nn.Linear(hidden_dim // 2, action_dim),
            nn.Sigmoid(),
        )

    def forward(self, x):
        """
        输入: x.shape = [batch, nf, n_var]
        输出: action.shape = [batch, future_count, action_dim]
        """
        batch_size = x.shape[0]

        # 1. 局部 patch 特征提取
        patch_features = self.struct_processor[0:3](x)  # [B, nf, hidden_dim]

        # 2. 全局 patch 聚合 → 得到两个向量
        global_features = self.struct_processor[3:](
            patch_features.flatten(start_dim=1)  # → [B, nf * hidden_dim]
        )  # → [B, 2, hidden_dim]

        # 3. 使用第一个向量生成每个品种的权重
        weights = self.weight_generator(
            global_features[:, 0]
        )  # [B, future_count * hidden_dim]
        weights = weights.view(
            batch_size, self.future_count, -1
        )  # [B, future_count, hidden_dim]

        # 4. 共享特征与动态权重融合
        shared = global_features[:, 1].unsqueeze(1)  # [B, 1, hidden_dim]
        weighted_features = weights * shared  # [B, future_count, hidden_dim]

        # 5. 决策输出
        return self.base_policy(weighted_features)  # [B, future_count, action_dim]


class ValueNet(nn.Module):
    def __init__(self, nf, n_var, hidden_dim, future_count):
        super().__init__()
        self.nf = nf
        self.n_var = n_var
        self.future_count = future_count
        self.hidden_dim = hidden_dim

        # Patch特征提取层（共享）
        self.patch_processor = nn.Sequential(
            nn.LayerNorm(n_var), nn.Linear(n_var, hidden_dim), nn.GELU()
        )

        # 跨 patch 聚合，提取每个品种的全局特征
        self.global_aggregator = nn.Sequential(
            nn.Linear(hidden_dim * nf, hidden_dim * 2),
            nn.Unflatten(-1, (2, hidden_dim)),
            nn.GELU(),
        )

        # 状态价值输出头：基于所有 future 的全局特征求均值后做估值
        self.value_head = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, 1)
        )

    def forward(self, x):
        """
        输入: x.shape = [B, F, nf, n_var]
        输出: V(s).shape = [B, 1]
        """
        B, F, nf, n_var = x.shape
        x = x.view(B * F, nf, n_var)  # 合并 batch 和品种 → [B*F, nf, n_var]

        # 1. Patch 特征提取
        patch_features = self.patch_processor(x)  # → [B*F, nf, hidden_dim]

        # 2. 聚合为品种级别全局表示
        global_features = self.global_aggregator(
            patch_features.flatten(start_dim=1)
        )  # [B*F, 2, hidden_dim]

        # 3. 取 global_features 中第一个向量作为品种表示
        future_repr = global_features[:, 0]  # [B*F, hidden_dim]

        # 4. 还原为 [B, F, hidden_dim]
        future_repr = future_repr.view(B, F, self.hidden_dim)

        # 5. 聚合所有 future 表示（例如取均值）
        state_repr = future_repr.mean(dim=1)  # [B, hidden_dim]

        # 6. 输出状态价值 V(s)
        return self.value_head(state_repr)  # [B, 1]


class ActorCritic_sample:
    def __init__(
        self,
        nf,
        n_var,
        hidden_dim,
        future_count,
        actor_lr,
        critic_lr,
        gamma,
        device,
        env,
        FeatureExtractor,
        FeatureExtractor_lr,
        FeatureExtractor_freeze_epochs,
        epsilon=0.2,
    ):
        # 策略网络（不再接受amount）
        self.actor = PolicyNet(nf, n_var, hidden_dim, future_count).to(device)
        # 价值网络（不再接受amount）
        self.critic = ValueNet(nf, n_var, hidden_dim, future_count).to(device)

        # 优化器
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)

        self.gamma = gamma
        self.device = device
        self.env = env
        self.FeatureExtractor = FeatureExtractor
        self.FeatureExtractor_optimizer = torch.optim.Adam(
            self.FeatureExtractor.parameters(), lr=FeatureExtractor_lr
        )
        self.FeatureExtractor_freeze_epochs = FeatureExtractor_freeze_epochs
        self.model_dir = "./checkpoints"
        os.makedirs(self.model_dir, exist_ok=True)
        self.name = "ActorCritic_sample_Agent"
        self.epsilon = epsilon
        self.mode = "pretrain"

    def take_action(self, state):
        try:
            # 1. 状态预处理（添加异常捕获）
            state_blc = self.process_state_to_blc(
                state,
                self.env.get_window(),
                self.env.get_future_count(),
                self.env.get_tech_num(),
            ).to(self.device)
            state_blc = utils.process_blc_features(state_blc)
            # 检查输入数据有效性
            if torch.isnan(state_blc).any() or torch.isinf(state_blc).any():
                raise ValueError("Invalid state values detected in state_blc")

            # 2. 特征提取（添加梯度检查点）
            with torch.no_grad():  # 特征提取阶段不计算梯度
                state_encoded = self.FeatureExtractor(state_blc)  # [batch , nf,n_var]

                # 检查特征提取输出
                if torch.isnan(state_encoded).any():
                    print("Warning: NaN in state_encoded, using zeros as fallback")
                    state_encoded = torch.zeros_like(state_encoded)

            # 3. 动作概率计算（带稳定性保护）
            raw_logits = self.actor(state_encoded)  # [batch , future_num,action_dim]

            # 数值稳定性处理
            raw_logits = torch.nan_to_num(
                raw_logits, nan=0.0, posinf=10.0, neginf=-10.0
            )
            action_probs = torch.softmax(
                raw_logits.clamp(-10, 10), dim=-1
            )  # 限制输入范围

            # 概率归一化保护
            if not torch.allclose(action_probs.sum(-1), torch.tensor(1.0), atol=1e-3):
                print("Warning: Invalid probability sum, using uniform distribution")
                action_probs = torch.ones_like(action_probs) / action_probs.shape[-1]

            # 4. 探索噪声（带温度系数控制）
            if self.mode == "pretrain":
                noise = torch.rand_like(action_probs) * self.epsilon
                action_probs = action_probs * (1 - self.epsilon) + noise
                action_probs = action_probs / action_probs.sum(
                    -1, keepdim=True
                )  # 重新归一化

            elif self.mode == "rl":
                pass

            # 5. 动作采样（带fallback机制）
            sampled_actions = []
            for i in range(self.actor.future_count):
                try:
                    # 使用稳定的Categorical分布
                    dist = torch.distributions.Categorical(
                        probs=action_probs[0, i, :].clamp(min=1e-10)
                    )
                    action = dist.sample()
                    mapped_action = (action.float() / 3 - 1).clamp(-1, 1)
                except Exception as e:
                    print(f"Sampling failed: {e}, using neutral action")
                    mapped_action = torch.tensor(0.0, device=self.device)

                sampled_actions.append(mapped_action)

            return torch.stack(sampled_actions)

        except Exception as e:
            print(f"Critical error in take_action: {e}")
            # 返回中性动作作为fallback
            return torch.zeros(self.actor.future_count, device=self.device)

    def update(self, transition_dict, epoch):
        # 参数信息
        window = self.env.get_window()
        future_count = self.env.get_future_count()
        tech_num = self.env.get_tech_num()

        # ----------------------
        # 处理状态数据（仅blc）
        # ----------------------
        states_blc = torch.stack(
            [
                self.process_state_to_blc(s, window, future_count, tech_num)
                for s in transition_dict["states"]
            ]
        ).to(
            self.device
        )  # [B, b, l, c]

        B, b, l, c = states_blc.shape
        states_flat = states_blc.view(B * b, l, c)
        states_encoded = self.FeatureExtractor(states_flat).reshape(B, b, -1, c)

        # ----------------------
        # 处理next_states（仅blc）
        # ----------------------
        next_states_blc = torch.stack(
            [
                self.process_state_to_blc(s, window, future_count, tech_num)
                for s in transition_dict["next_states"]
            ]
        ).to(self.device)

        next_states_flat = next_states_blc.view(B * b, l, c)
        next_states_encoded = self.FeatureExtractor(next_states_flat).reshape(
            B, b, -1, c
        )

        # ----------------------
        # 转换其他数据
        # ----------------------
        actions = torch.stack(transition_dict["actions"]).to(self.device)
        rewards = (
            torch.tensor(transition_dict["rewards"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )
        dones = (
            torch.tensor(transition_dict["dones"], dtype=torch.float)
            .view(-1, 1)
            .to(self.device)
        )

        # ----------------------
        # Critic 更新
        # ----------------------
        with torch.no_grad():
            next_v = self.critic(next_states_encoded)  # 直接评估下一状态价值
            target_v = rewards + self.gamma * next_v * (1 - dones)

        current_v = self.critic(states_encoded)
        critic_loss = F.mse_loss(current_v, target_v)
        td_delta = target_v - current_v  # 时序差分误差

        # ----------------------
        # Actor 更新
        # ----------------------
        continuous_actions = torch.stack(transition_dict["actions"]).to(
            self.device
        )  # [B, future_count]
        original_actions = (
            ((continuous_actions + 1) * 3).round().long().clamp(0, 6)
        )  # 反映射公式

        # 计算log_prob（使用反映射后的离散动作）
        action_probs = self.actor(states_encoded)  # [B, future_count, action_dim=7]
        action_probs_flat = action_probs.view(
            -1, action_probs.size(-1)
        )  # [B*future_count, 7]

        log_probs = torch.log(action_probs_flat.gather(1, original_actions.view(-1, 1)))

        entropy = -torch.sum(action_probs * torch.log(action_probs + 1e-10), dim=-1)
        td_delta_expanded = td_delta.detach().repeat_interleave(future_count, dim=0)

        # 组合损失（保持与原始相同的维度处理）
        actor_loss = torch.mean(
            -log_probs.squeeze() * td_delta_expanded + 0.01 * entropy
        )
        # ----------------------
        # 反向传播
        # ----------------------
        self.actor_optimizer.zero_grad()
        self.critic_optimizer.zero_grad()
        self.FeatureExtractor_optimizer.zero_grad()

        critic_loss.backward(retain_graph=True)  # 保留计算图
        self.critic_optimizer.step()

        actor_loss.backward()
        if epoch >= self.FeatureExtractor_freeze_epochs:
            self.FeatureExtractor_optimizer.step()
            self.actor_optimizer.step()

        return {
            "actor_loss": actor_loss.item(),
            "critic_loss": critic_loss.item(),
            "avg_value": current_v.mean().item(),
        }

    def process_state_to_blc(self, state, window, future_count, tech_num):
        offset = 1 + future_count
        price_flat = state[offset : offset + window * future_count]
        tech_flat = state[offset + window * future_count :]
        price = price_flat.reshape(window, future_count).T
        tech = tech_flat.reshape(window, future_count, tech_num).transpose(1, 0, 2)
        price = price[:, :, np.newaxis]
        out = np.concatenate([price, tech], axis=-1)
        out = torch.tensor(out, dtype=torch.float32)
        return self.norm_blc(out)

    def norm_blc(self, x):
        means = x.mean(1, keepdim=True).detach()
        x = x - means
        stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x /= stdev
        return x

    def save_model(self):
        torch.save(
            self.FeatureExtractor.state_dict(),
            self._get_model_path(self.FeatureExtractor.name),
        )
        torch.save(self.actor.state_dict(), self._get_model_path("actor"))
        torch.save(self.critic.state_dict(), self._get_model_path("critic"))

    def load_model(self):
        self.FeatureExtractor.load_state_dict(
            torch.load(self._get_model_path(self.FeatureExtractor.name))
        )
        self.actor.load_state_dict(torch.load(self._get_model_path("actor")))
        self.critic.load_state_dict(torch.load(self._get_model_path("critic")))

    def _get_model_path(self, module_name: str):
        return os.path.join(self.model_dir, f"{self.name}_{module_name}.pth")

    def set_pretrain(self):
        self.mode = "pretrain"

    def set_rl(self):
        self.mode = "rl"
