import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class ExpertCollaboration(nn.Module):
    """专家协作层 - 使用交叉注意力机制让专家输出相互交互"""

    def __init__(self, expert_dim, num_experts, num_heads=4):
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = expert_dim // num_heads
        assert expert_dim % num_heads == 0, "专家维度必须能被注意力头数整除"

        # 多头注意力投影
        self.query_proj = nn.Linear(expert_dim, expert_dim)
        self.key_proj = nn.Linear(expert_dim, expert_dim)
        self.value_proj = nn.Linear(expert_dim, expert_dim)
        self.output_proj = nn.Linear(expert_dim, expert_dim)

        self.scale = math.sqrt(self.head_dim)
        self.layer_norm = nn.LayerNorm(expert_dim)

    def forward(self, expert_outputs):
        # expert_outputs: [batch_size, num_experts, expert_dim]
        batch_size, num_experts, expert_dim = expert_outputs.shape

        # 多头注意力计算
        # 形状变换为 [batch_size, num_experts, num_heads, head_dim]
        queries = self.query_proj(expert_outputs).view(batch_size, num_experts, self.num_heads, self.head_dim)
        keys = self.key_proj(expert_outputs).view(batch_size, num_experts, self.num_heads, self.head_dim)
        values = self.value_proj(expert_outputs).view(batch_size, num_experts, self.num_heads, self.head_dim)

        # 转置以便进行批量矩阵乘法 [batch_size, num_heads, num_experts, head_dim]
        queries = queries.transpose(1, 2)
        keys = keys.transpose(1, 2)
        values = values.transpose(1, 2)

        # 注意力分数计算
        scores = torch.matmul(queries,
                              keys.transpose(2, 3)) / self.scale  # [batch_size, num_heads, num_experts, num_experts]
        attention_weights = F.softmax(scores, dim=-1)

        # 加权聚合
        context = torch.matmul(attention_weights, values)  # [batch_size, num_heads, num_experts, head_dim]

        # 转置回原始形状并合并多头
        context = context.transpose(1, 2).contiguous().view(batch_size, num_experts, expert_dim)

        # 输出投影
        output = self.output_proj(context)

        # 残差连接和层归一化
        collab_outputs = self.layer_norm(expert_outputs + output)

        return collab_outputs


# 定义不同类型的专家网络 (MLPExpert, ResidualExpert, etc. 省略未变的代码)
class MLPExpert(nn.Module):
    """标准MLP专家"""

    def __init__(self, input_size, output_size):
        super().__init__()
        self.net = nn.Sequential(
            nn.BatchNorm1d(input_size),
            nn.Linear(input_size, output_size * 2),
            nn.ReLU(),
            nn.Linear(output_size * 2, output_size),
            nn.ReLU()
        )

    def forward(self, x):
        return self.net(x)


class ResidualExpert(nn.Module):
    """带残差连接的专家"""

    def __init__(self, input_size, output_size):
        super().__init__()
        self.norm = nn.BatchNorm1d(input_size)
        self.projection = nn.Linear(input_size, output_size)

        self.block1 = nn.Sequential(
            nn.Linear(output_size, output_size),
            nn.ReLU(),
            nn.Linear(output_size, output_size)
        )

        self.block2 = nn.Sequential(
            nn.Linear(output_size, output_size),
            nn.ReLU(),
            nn.Linear(output_size, output_size)
        )

        self.layer_norm1 = nn.LayerNorm(output_size)
        self.layer_norm2 = nn.LayerNorm(output_size)

    def forward(self, x):
        x = self.norm(x)
        x = self.projection(x)

        # 第一个残差块
        residual = x
        x = self.block1(x)
        x = self.layer_norm1(x + residual)

        # 第二个残差块
        residual = x
        x = self.block2(x)
        x = self.layer_norm2(x + residual)

        return x


class AttentionExpert(nn.Module):
    """自注意力专家"""

    def __init__(self, input_size, output_size):
        super().__init__()
        self.norm = nn.BatchNorm1d(input_size)
        self.projection = nn.Linear(input_size, output_size)

        # 自注意力机制
        self.query = nn.Linear(output_size, output_size)
        self.key = nn.Linear(output_size, output_size)
        self.value = nn.Linear(output_size, output_size)
        self.scale = math.sqrt(output_size)

        self.output_projection = nn.Linear(output_size, output_size)
        self.layer_norm = nn.LayerNorm(output_size)

    def forward(self, x):
        batch_size = x.size(0)
        x = self.norm(x)
        x = self.projection(x)

        # 对特征维度做自注意力
        # 重塑为 [batch_size, 1, output_size]
        x_reshaped = x.unsqueeze(1)

        q = self.query(x_reshaped)
        k = self.key(x_reshaped)
        v = self.value(x_reshaped)

        # 计算注意力权重
        attn_scores = torch.bmm(q, k.transpose(1, 2)) / self.scale
        attn_weights = F.softmax(attn_scores, dim=-1)

        # 加权求和
        context = torch.bmm(attn_weights, v).squeeze(1)

        # 输出投影和残差连接
        output = self.output_projection(context)
        return F.relu(self.layer_norm(output + x))


class FeatureCrossExpert(nn.Module):
    """特征交叉专家 (类似DCN)"""

    def __init__(self, input_size, output_size):
        super().__init__()
        self.norm = nn.BatchNorm1d(input_size)

        # 特征映射到统一空间
        self.projection = nn.Linear(input_size, output_size)

        # 交叉网络层
        self.cross_w = nn.ModuleList([
            nn.Linear(output_size, output_size, bias=False) for _ in range(3)
        ])
        self.cross_b = nn.ParameterList([
            nn.Parameter(torch.zeros(output_size)) for _ in range(3)
        ])

        # 最终处理
        self.output_layer = nn.Sequential(
            nn.Linear(output_size, output_size),
            nn.ReLU()
        )

    def forward(self, x):
        x = self.norm(x)
        x0 = self.projection(x)
        x_cross = x0

        # 执行3层交叉
        for i in range(3):
            # x_cross = x0 * (x_cross·w) + b + x_cross
            cross_term = torch.matmul(x_cross.unsqueeze(1), self.cross_w[i].weight.t()).squeeze(1)
            x_cross = x0 * cross_term + self.cross_b[i] + x_cross

        return self.output_layer(x_cross)


class HeterogeneousMoE(nn.Module):
    """异构混合专家层 - 每个专家有不同的网络结构"""

    def __init__(self, input_size, output_size, num_experts=2, gate_hidden=64):  # 改为2个专家
        super().__init__()
        self.input_size = input_size
        self.output_size = output_size
        self.num_experts = num_experts

        # 门控网络
        self.gate = nn.Sequential(
            nn.BatchNorm1d(input_size),
            nn.Linear(input_size, gate_hidden),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(gate_hidden, num_experts),
            nn.LogSoftmax(dim=1)  # 使用LogSoftmax提高数值稳定性
        )

        # 创建异构专家网络 - 只使用2个专家
        self.experts = nn.ModuleList([
            MLPExpert(input_size, output_size),  # 专家1: 标准MLP
            ResidualExpert(input_size, output_size),  # 专家2: 残差网络
            # AttentionExpert(input_size, output_size),         # 专家3: 自注意力网络
            # FeatureCrossExpert(input_size, output_size)       # 专家4: 特征交叉网络
        ])

    def forward(self, x, return_all_outputs=False):
        # 处理NaN和Inf
        if torch.isnan(x).any() or torch.isinf(x).any():
            x = torch.nan_to_num(x, nan=0.0, posinf=1.0, neginf=-1.0)

        # 计算门控权重
        log_gate_outputs = self.gate(x)
        gate_outputs = torch.exp(log_gate_outputs)  # 数值稳定的softmax

        # 并行处理所有专家的输出
        expert_outputs = []
        for expert in self.experts:
            expert_output = expert(x)  # [batch_size, output_size]
            expert_outputs.append(expert_output)

        # 将专家输出堆叠为 [batch_size, num_experts, output_size]
        stacked_outputs = torch.stack(expert_outputs, dim=1)

        # 如果需要返回每个专家的输出
        if return_all_outputs:
            return stacked_outputs, gate_outputs

        # 计算最终输出：加权混合所有专家的输出
        gate_outputs_expanded = gate_outputs.unsqueeze(-1)
        final_output = (stacked_outputs * gate_outputs_expanded).sum(dim=1)

        return final_output, gate_outputs


class CriteoMoEModel(nn.Module):
    """异构专家网络的单层MoE模型用于Criteo CTR预测"""

    def __init__(self, int_dims, cat_dims, embed_dim=8, num_experts=2, hidden_size=128, gate_hidden=64):
        super().__init__()
        self.int_dims = int_dims
        self.cat_dims = cat_dims
        self.embed_dim = embed_dim

        # 创建分类特征的嵌入层
        self.embeddings = nn.ModuleList([
            nn.Embedding(dim, embed_dim, padding_idx=0) for dim in cat_dims
        ])

        # 计算嵌入后的总特征维度
        self.total_embed_dim = int_dims + len(cat_dims) * embed_dim

        # 输入归一化和特征处理
        self.input_norm = nn.BatchNorm1d(self.total_embed_dim)
        self.input_dropout = nn.Dropout(0.1)

        # 异构专家MoE层
        self.moe_layer = HeterogeneousMoE(
            input_size=self.total_embed_dim,
            output_size=hidden_size,
            num_experts=num_experts,
            gate_hidden=gate_hidden
        )

        # ==================== [新创新点修改 1/2] 开始 ====================
        # 重新引入专家协作层
        self.expert_collaboration = ExpertCollaboration(
            expert_dim=hidden_size,
            num_experts=num_experts,
            num_heads=4
        )

        # 定义一个可学习的标量参数 alpha，并初始化为0
        self.collaboration_alpha = nn.Parameter(torch.zeros(1))
        # ==================== [新创新点修改 1/2] 结束 ====================

        # 特征整合层
        self.feature_integration = nn.Sequential(
            nn.BatchNorm1d(hidden_size),
            nn.Dropout(0.2),
            nn.Linear(hidden_size, hidden_size // 2),
            nn.ReLU(),
            nn.BatchNorm1d(hidden_size // 2),
            nn.Dropout(0.1)
        )

        # 输出层
        self.predictor = nn.Linear(hidden_size // 2, 1)

        # 存储门控权重
        self.gate_weights = {}

        # 平衡损失权重
        self.balance_loss_weight = 0.001

        # 初始化权重
        self._init_weights()

    def _init_weights(self):
        """特殊初始化权重以提高稳定性"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight, gain=0.5)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.LayerNorm):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)

    def forward(self, int_features, cat_features):
        int_features = torch.nan_to_num(int_features, nan=0.0, posinf=1.0, neginf=-1.0)

        cat_embeds = []
        for i, embedding in enumerate(self.embeddings):
            cat_embeds.append(embedding(cat_features[:, i]))

        combined_features = torch.cat([int_features] + cat_embeds, dim=1)
        combined_features = self.input_norm(combined_features)
        combined_features = self.input_dropout(combined_features)

        try:
            expert_outputs, gate = self.moe_layer(combined_features, return_all_outputs=True)
            self.gate_weights['moe_layer'] = gate
            expert_usage = gate.mean(0)

            # ==================== [新创新点修改 2/2] 开始 ====================
            # 1. 计算协作后的专家输出
            collaborated_outputs = self.expert_collaboration(expert_outputs)

            # 2. 将原始输出和协作输出通过 alpha 进行残差连接
            #    这使得模型可以自己学习协作的强度
            final_expert_outputs = expert_outputs + self.collaboration_alpha * collaborated_outputs

            # 3. 使用自适应融合后的输出进行加权求和
            x = (final_expert_outputs * gate.unsqueeze(-1)).sum(dim=1)
            # ==================== [新创新点修改 2/2] 结束 ====================

            if torch.isnan(x).any() or torch.isinf(x).any():
                x = torch.nan_to_num(x, nan=0.0, posinf=1.0, neginf=-1.0)

            x = self.feature_integration(x)
            logits = self.predictor(x)

            if torch.isnan(logits).any() or torch.isinf(logits).any():
                logits = torch.zeros_like(logits)

            result = {
                'prediction': logits.squeeze(-1),
                'gate_weights': self.gate_weights,
                'embedding': x
            }

            # 可以在训练过程中监控 alpha 的值
            # 注意：这个值每个batch都一样，可以在trainer里每个epoch打印一次
            self.gate_weights['alpha'] = self.collaboration_alpha.item()

            if hasattr(self, 'balance_loss_weight') and self.balance_loss_weight > 0:
                uniformity = -(expert_usage * torch.log(expert_usage + 1e-10)).sum()
                result['balance_loss'] = -uniformity * self.balance_loss_weight

            return result

        except Exception as e:
            print(f"前向传播中发生错误: {e}")
            logits = torch.zeros((int_features.size(0), 1), device=int_features.device)
            self.gate_weights = {'moe_layer': None}

            return {
                'prediction': logits.squeeze(-1),
                'gate_weights': self.gate_weights,
                'embedding': None
            }