import torch.nn as nn
import torch.nn.functional as F
import torch
from torch_geometric.nn import GATConv
from torch_geometric.utils import dense_to_sparse
import utils.utils as utils


class Gate(nn.Module):
    def __init__(self, d_input, d_output, beta=1.0):
        super().__init__()
        self.trans = nn.Linear(d_input, d_output)
        self.d_output = d_output
        self.t = beta

    def forward(self, gate_input):
        output = self.trans(gate_input)
        output = torch.softmax(output / self.t, dim=-1)
        return self.d_output * output


class EnhancedCombinedModel(nn.Module):
    def __init__(self, graph_model, dt_model, dh_model, input_dim, gate_dim, beta=1.0):
        super().__init__()
        self.graph_model = graph_model
        self.dt_model = dt_model
        self.dh_model = dh_model
        # 新增门控层
        self.gate = Gate(input_dim, gate_dim, beta)

    def forward(self, x, matrix):
        """
        x: [B, N, L, C] 输入特征
        matrix: [B, N, N] 邻接矩阵
        """
        B, N, L, C = x.shape

        # 1. 应用门控机制
        x_flat = x.reshape(B * N * L, C)  # 展平为 [B*N*L, C]
        x_gated = self.gate(x_flat)  # 门控加权 [B*N*L, gate_dim]
        x = x_gated.reshape(B, N, L, -1)  # 恢复形状 [B, N, L, gate_dim]

        # 2. 原有流程不变
        x = self.graph_model(x, matrix)
        b, f, l, c = x.shape
        x = x.view(b * f, l, c)
        x = self.dt_model(x)
        x = x.view(b, f, l, c)
        output, loss_gp = self.dh_model(x, matrix)

        return output, loss_gp


# 带有时间距离的attention结构
class ALSPAttentionLayer(nn.Module):
    def __init__(self, embed_dim, num_heads, dropout=0.1):
        super(ALSPAttentionLayer, self).__init__()
        self.num_heads = num_heads
        self.embed_dim = embed_dim
        self.head_dim = embed_dim // num_heads
        assert embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads"

        # Q, K, V projection
        self.q_proj = nn.Linear(embed_dim, embed_dim)
        self.k_proj = nn.Linear(embed_dim, embed_dim)
        self.v_proj = nn.Linear(embed_dim, embed_dim)
        self.out_proj = nn.Linear(embed_dim, embed_dim)

        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(embed_dim)

        # 🔑 ASP 相关矩阵（固定，最大长度 = max_len）
        self.register_buffer("asp_mask", self.build_asp_matrix())

    def build_asp_matrix(self, max_len=5000):
        """
        构造 ASP 时间相关矩阵 (max_len x max_len)，
        比如可以用 1 / (1 + |i-j|) 这种随距离衰减的权重。
        """
        idx = torch.arange(max_len)
        dist = torch.abs(idx[:, None] - idx[None, :]).float()
        mask = 1.0 / (1.0 + dist)  # 越近相关性越强
        return mask  # shape: (max_len, max_len)

    def forward(self, x, mask=None):
        B, L, _ = x.shape
        # Q, K, V
        Q = (
            self.q_proj(x).view(B, L, self.num_heads, self.head_dim).transpose(1, 2)
        )  # (B, h, L, d)
        K = self.k_proj(x).view(B, L, self.num_heads, self.head_dim).transpose(1, 2)
        V = self.v_proj(x).view(B, L, self.num_heads, self.head_dim).transpose(1, 2)
        # Scaled dot-product
        scores = torch.matmul(Q, K.transpose(-2, -1)) / (
            self.head_dim**0.5
        )  # (B, h, L, L)
        asp = self.asp_mask[:L, :L]
        scores = scores * asp

        if mask is not None:
            scores = scores.masked_fill(mask[:, None, None, :] == 0, float("-inf"))

        attn = F.softmax(scores, dim=-1)
        attn = self.dropout(attn)
        out = torch.matmul(attn, V)  # (B, h, L, d)
        out = (
            out.transpose(1, 2).contiguous().view(B, L, self.embed_dim)
        )  # (B, L, d_model)

        return self.norm1(self.out_proj(out))


class DilatedSelfAttention(nn.Module):
    def __init__(self, d_model, dilation=1):
        """
        d_model: 特征维度
        seq_len: 序列长度 L
        dilation: 空洞率
        """
        super().__init__()
        self.d_model = d_model
        self.dilation = dilation
        seq_len = 1000
        self.q_proj = nn.Linear(d_model, d_model)
        self.k_proj = nn.Linear(d_model, d_model)
        self.v_proj = nn.Linear(d_model, d_model)
        self.out_proj = nn.Linear(d_model, d_model)

        mask = torch.zeros(seq_len, seq_len, dtype=torch.bool)
        for i in range(seq_len):
            for j in range(i, -1, -dilation):
                mask[i, j] = True
        self.register_buffer("mask", mask)  # 保存为 buffer，自动放到 device

    def forward(self, x):
        """
        x: [B, L, D]
        """
        B, L, D = x.shape

        Q = self.q_proj(x)  # [B, L, D]
        K = self.k_proj(x)  # [B, L, D]
        V = self.v_proj(x)  # [B, L, D]

        # 使用初始化好的 mask，切片成实际序列长度
        mask = self.mask[:L, :L]  # [L, L]
        attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / (D**0.5)  # [B, L, L]
        attn_scores = attn_scores.masked_fill(~mask, float("-inf"))
        attn_weights = F.softmax(attn_scores, dim=-1)
        out = torch.matmul(attn_weights, V)  # [B, L, D]
        out = self.out_proj(out)  # [B, L, D]
        return out


class GeGLU_FFN(nn.Module):
    def __init__(self, d_model, dim_feedforward):
        super().__init__()
        hidden_dim = int(dim_feedforward * 2 / 3)
        self.W1 = nn.Linear(d_model, hidden_dim)
        self.W_gate = nn.Linear(d_model, hidden_dim)
        self.W2 = nn.Linear(hidden_dim, d_model)

    def forward(self, x):
        x1 = self.W1(x)
        x_gate = self.W_gate(x)
        gated_x = F.gelu(x_gate)
        activated_x = x1 * gated_x
        output = self.W2(activated_x)
        return output


# 有着类似空洞卷积的attention结构
class DilatedTransformerLayer(nn.Module):
    def __init__(self, d_model, dim_feedforward=256, dropout=0.1, dilation=1):
        super().__init__()
        self.attn = DilatedSelfAttention(d_model, dilation)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)

        self.ff = GeGLU_FFN(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        # Self-attention + 残差
        x = x + self.dropout(self.attn(self.norm1(x)))
        # FFN + 残差
        x = x + self.dropout(self.ff(self.norm2(x)))
        return x


class DilatedTransformerEncoder(nn.Module):
    def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1):
        super(DilatedTransformerEncoder, self).__init__()
        self.layers = nn.ModuleList(
            [
                DilatedTransformerLayer(
                    d_model=embed_dim,
                    dim_feedforward=ff_dim,
                    dropout=dropout,
                    dilation=1,
                ),
                ALSPAttentionLayer(embed_dim, num_heads, dropout=dropout),
                DilatedTransformerLayer(
                    d_model=embed_dim,
                    dim_feedforward=ff_dim,
                    dropout=dropout,
                    dilation=2,
                ),
                ALSPAttentionLayer(embed_dim, num_heads, dropout=dropout),
                DilatedTransformerLayer(
                    d_model=embed_dim,
                    dim_feedforward=ff_dim,
                    dropout=dropout,
                    dilation=3,
                ),
                ALSPAttentionLayer(embed_dim, num_heads, dropout=dropout),
            ]
        )

    def forward(self, x, mask=None):
        for layer in self.layers:
            x = layer(x)
        return x


class GEGLU(nn.Module):
    """
    实现 GEGLU 激活层。
    它将输入通过一个线性层映射到两倍的输出维度，然后切分成两半。
    一半通过 GELU 激活，另一半直接作为门控，两者逐元素相乘。
    """

    def __init__(self, in_features: int, out_features: int):
        super().__init__()
        self.out_features = out_features
        self.proj = nn.Linear(in_features, out_features * 2)
        self.gelu = nn.GELU()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.proj(x)
        x_gate, x_linear = x.chunk(2, dim=-1)
        return self.gelu(x_gate) * x_linear


class EmbeddingHead(nn.Module):
    """
    表示头：接收 (B, N, L, C)，输出 (B, N, C)。
    使用 GEGLU 替换了原有的 ReLU-MLP 结构以增强稳定性。
    """

    def __init__(self, input_channels: int, hidden_dim: int = 64):
        super().__init__()
        self.mlp = nn.Sequential(
            GEGLU(input_channels, hidden_dim),
            nn.Linear(hidden_dim, input_channels),
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_pooled = torch.mean(x, dim=-2)  # (B, N, C)
        return self.mlp(x_pooled)  # (B, N, C)


class DecisionHead(nn.Module):
    """
    决策头：接收 (B, N, C)，输出 (B, N, 1)。
    """

    def __init__(self, input_channels: int, hidden_dim: int = 64):
        super().__init__()
        self.mlp = nn.Sequential(
            nn.Linear(input_channels, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, int(hidden_dim / 2)),
            nn.ReLU(),
            nn.Linear(int(hidden_dim / 2), 1),
            # nn.Sigmoid(),
        )

    def forward(self, x: torch.Tensor, adj_matrix: torch.Tensor = None):
        """
        x: (B, N, C)
        adj_matrix: 用于计算图正则化的邻接矩阵
        """
        loss_gp = None
        if adj_matrix is not None:
            loss_gp = self.calculate_gp_loss(x, adj_matrix)

        output = self.mlp(x)  # (B, N, 1)
        return output, loss_gp

    def calculate_gp_loss(
        self, embeddings: torch.Tensor, adj_matrix: torch.Tensor
    ) -> torch.Tensor:
        """
        embeddings: (B, N, C)
        adj_matrix: (N, N) 邻接矩阵
        """
        B, N, C = embeddings.shape
        similarity_matrix = torch.bmm(embeddings, embeddings.transpose(1, 2))

        positive_mask = adj_matrix.clone()
        positive_mask.diagonal(dim1=-2, dim2=-1).zero_()

        logits = (
            similarity_matrix
            - torch.max(similarity_matrix, dim=-1, keepdim=True)[0].detach()
        )
        exp_logits = torch.exp(logits)

        identity_mask = torch.eye(N, device=embeddings.device).expand(B, N, N)
        exp_logits_no_diag = torch.where(
            identity_mask == 1, torch.zeros_like(exp_logits), exp_logits
        )

        log_prob = logits - torch.log(exp_logits_no_diag.sum(1, keepdim=True) + 1e-8)
        mean_log_prob_pos = (positive_mask * log_prob).sum() / (
            positive_mask.sum() + 1e-8
        )
        loss = -mean_log_prob_pos
        return loss


class StockPoolSpatioTemporalSmoothing(nn.Module):
    """
    处理一批股票池的时空平滑模块（向量化版本）。
    输入:
        - 特征张量 x_batch: [Batch_Size, Stock_Num, L, C]
        - 邻接矩阵 metric_batch: [Batch_Size, Stock_Num, Stock_Num]
    输出:
        - 平滑后的特征张量: [Batch_Size, Stock_Num, L, C]
    """

    def __init__(self, in_channels, hidden_channels, out_channels):
        super(StockPoolSpatioTemporalSmoothing, self).__init__()
        self.gat_layer = GATConv(
            in_channels=in_channels,
            out_channels=hidden_channels,
            heads=1,  # 使用单头注意力以保持维度不变
            concat=False,
        )
        self.gru_cell = nn.GRUCell(input_size=hidden_channels, hidden_size=out_channels)

    def forward(self, x_batch, metric_batch):
        """
        向量化版本的前向传播函数。

        Args:
            x_batch (torch.Tensor): 特征张量，形状 [B, N, L, C]
            metric_batch (torch.Tensor): 邻接矩阵，形状 [B, N, N]
        Returns:
            final_output (torch.Tensor): [B, N, L, C]
        """
        B, N, L, C = x_batch.shape
        edge_index, edge_attr = dense_to_sparse(metric_batch)
        x_batch_reshaped = x_batch.permute(2, 0, 1, 3).reshape(L, B * N, C)
        h = torch.zeros(B * N, C, device=x_batch.device)
        outputs = []

        # 在时间维度 L 上进行循环
        for t in range(L):
            # 提取当前时间步 t 的所有数据，形状为 [B*N, C]
            x_t = x_batch_reshaped[t]
            # gat_output 的形状是 [B*N, C]
            gat_output = self.gat_layer(x_t, edge_index)
            h = self.gru_cell(gat_output, h)

            outputs.append(h)

        # stacked_outputs 的形状是 [L, B*N, C]
        stacked_outputs = torch.stack(outputs, dim=0)
        # 恢复原始的 [B, N, L, C] 形状
        final_output = stacked_outputs.reshape(L, B, N, C).permute(1, 2, 0, 3)
        return final_output


class CombinedModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_heads):
        super().__init__()
        self.graph_model = StockPoolSpatioTemporalSmoothing(
            input_dim, hidden_dim, input_dim
        )

        self.dt_model = DilatedTransformerEncoder(input_dim, num_heads, hidden_dim)
        self.eb_model = EmbeddingHead(input_dim, hidden_dim)
        self.dh_model = DecisionHead(input_dim, hidden_dim)

        self.volatility_scaler = nn.Parameter(torch.tensor([1.0]))

    def forward(self, x, matrix=None, mode="dl"):
        x = (x - x.mean(dim=-2, keepdim=True)) / (x.std(dim=-2, keepdim=True) + 1e-8)
        if matrix is None:
            matrix = utils.batch_calculate_dtw_adjacency_mask(x)
        # b, f, l, c
        x = x + self.graph_model(x, matrix)
        b, f, l, c = x.shape
        x = x.view(b * f, l, c)
        x = self.dt_model(x)
        x = x.view(b, f, l, c)
        if mode == "rl":
            eb_x = self.eb_model(x)
            eb_x = torch.sigmoid(eb_x)
            return eb_x
        elif mode == "dl":
            x = self.eb_model(x)
            pred, loss_gp = self.dh_model(x, matrix)
            final_pred = pred * self.volatility_scaler
            return final_pred, loss_gp
        else:
            raise ValueError("Mode must be either 'rl' or 'dl'")
