import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from transformers import AutoModel


class VisualExpert(nn.Module):
    def __init__(self, backbone: str = "resnet50", out_dim: int = 512, pretrained: bool = False):
        super().__init__()
        if backbone == "resnet50":
            weights = models.ResNet50_Weights.IMAGENET1K_V2 if pretrained else None
            m = models.resnet50(weights=weights)
            feat_dim = 2048
            m.fc = nn.Identity()
            self.backbone = m
        elif backbone == "efficientnet_b0":
            weights = models.EfficientNet_B0_Weights.IMAGENET1K_V1 if pretrained else None
            m = models.efficientnet_b0(weights=weights)
            feat_dim = m.classifier[1].in_features
            m.classifier = nn.Identity()
            self.backbone = m
        else:
            weights = models.ResNet50_Weights.IMAGENET1K_V2 if pretrained else None
            m = models.resnet50(weights=weights)
            feat_dim = 2048
            m.fc = nn.Identity()
            self.backbone = m
        self.proj = nn.Linear(feat_dim, out_dim)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        feat = self.backbone(x)
        return self.proj(feat)


class TextExpert(nn.Module):
    def __init__(self, model_name: str = "distilbert-base-uncased", out_dim: int = 512, train_encoder: bool = False):
        super().__init__()
        self.encoder = AutoModel.from_pretrained(model_name)
        if not train_encoder:
            for p in self.encoder.parameters():
                p.requires_grad = False
        hidden = self.encoder.config.hidden_size
        self.token_proj = nn.Linear(hidden, out_dim)
        self.pool_proj = nn.Linear(hidden, out_dim)

    def forward(self, input_ids: torch.Tensor, attention_mask: torch.Tensor):
        enc = self.encoder(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=False, return_dict=True)
        tokens = enc.last_hidden_state
        pooled = tokens[:, 0]
        tokens_proj = self.token_proj(tokens)
        pooled_proj = self.pool_proj(pooled)
        return pooled_proj, tokens_proj


class CrossModalExpert(nn.Module):
    def __init__(self, dim: int = 512, num_heads: int = 8, out_dim: int = 512):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)
        self.ff = nn.Sequential(nn.Linear(dim, dim), nn.ReLU(), nn.Linear(dim, out_dim))

    def forward(self, visual_feat: torch.Tensor, text_tokens: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
        B = visual_feat.shape[0]
        q = visual_feat.unsqueeze(1)
        key_padding_mask = (attention_mask == 0)
        out, _ = self.attn(q, text_tokens, text_tokens, key_padding_mask=key_padding_mask)
        out = out.squeeze(1)
        return self.ff(out)


class MultiModalMoEClassifier(nn.Module):
    def __init__(
        self,
        num_classes: int,
        img_backbone: str = "resnet50",
        text_model: str = "distilbert-base-uncased",
        feature_dim: int = 512,
        num_experts: int = 3,
        top_k: int = 2,
        train_text_encoder: bool = False,
        noise_scale: float = 0.1,
        visual_pretrained: bool = False,
        use_visual_aux: bool = False,
        visual_aux_weight: float = 0.4,
        gating_strategy: str = "softmax_topk",
        gating_temperature: float = 1.0,
        gating_power_alpha: float = 1.0,
        second_prob_threshold: float = 0.0,
    ):
        super().__init__()
        self.num_experts = num_experts
        self.top_k = max(1, min(top_k, num_experts))
        self.noise_scale = noise_scale
        self.gating_strategy = gating_strategy
        self.gating_temperature = float(gating_temperature)
        self.gating_power_alpha = float(gating_power_alpha)
        self.second_prob_threshold = float(second_prob_threshold)
        self.use_visual_aux = bool(use_visual_aux)
        self.visual_aux_weight = float(visual_aux_weight)

        self.visual_expert = VisualExpert(backbone=img_backbone, out_dim=feature_dim, pretrained=visual_pretrained)
        self.text_expert = TextExpert(model_name=text_model, out_dim=feature_dim, train_encoder=train_text_encoder)
        self.cross_modal_expert = CrossModalExpert(dim=feature_dim, num_heads=8, out_dim=feature_dim)

        self.gate = nn.Sequential(
            nn.Linear(feature_dim * 2, num_experts),
            nn.Softmax(dim=-1),
        )

        self.classifier = nn.Linear(feature_dim, num_classes)
        if self.use_visual_aux:
            self.aux_classifier_visual = nn.Linear(feature_dim, num_classes)

    def _compute_topk_weights(self, topk_vals: torch.Tensor) -> torch.Tensor:
        if self.gating_strategy == "prob_norm":
            s = topk_vals.sum(dim=-1, keepdim=True)
            weights = torch.where(s > 0, topk_vals / s, F.softmax(topk_vals, dim=-1))
        elif self.gating_strategy == "softmax_temp":
            weights = F.softmax(topk_vals / max(self.gating_temperature, 1e-6), dim=-1)
        elif self.gating_strategy == "power":
            powed = torch.pow(topk_vals.clamp(min=0), self.gating_power_alpha)
            s = powed.sum(dim=-1, keepdim=True)
            weights = torch.where(s > 0, powed / s, F.softmax(topk_vals, dim=-1))
        else:
            weights = F.softmax(topk_vals, dim=-1)
        if self.top_k >= 2 and self.second_prob_threshold > 0:
            second = topk_vals[:, 1]
            mask = second < self.second_prob_threshold
            if mask.any():
                weights[mask] = torch.zeros_like(weights[mask])
                weights[mask, 0] = 1.0
        return weights

    def forward(self, images: torch.Tensor, input_ids: torch.Tensor, attention_mask: torch.Tensor):
        v_feat = self.visual_expert(images)
        t_pool, t_tokens = self.text_expert(input_ids=input_ids, attention_mask=attention_mask)
        cm_feat = self.cross_modal_expert(v_feat, t_tokens, attention_mask)

        feats = torch.stack([v_feat, t_pool, cm_feat], dim=1)

        gate_inp = torch.cat([v_feat, t_pool], dim=1)
        gate_probs = self.gate(gate_inp)
        if self.training and self.noise_scale > 0:
            gate_probs = gate_probs + torch.randn_like(gate_probs) * self.noise_scale
            gate_probs = torch.softmax(gate_probs, dim=-1)
        # 训练初期视觉必选策略（可选，带衰减）
        if self.training and hasattr(self, 'visual_mandatory_strength') and self.visual_mandatory_strength and self.visual_mandatory_strength > 0:
            # 提升视觉专家概率并归一化
            boost = self.visual_mandatory_strength
            B = gate_probs.size(0)
            visual_mask = F.one_hot(torch.zeros(B, dtype=torch.long, device=gate_probs.device), num_classes=self.num_experts).float()
            gate_probs = gate_probs + boost * visual_mask
            gate_probs = gate_probs / gate_probs.sum(dim=-1, keepdim=True)

        topk_vals, topk_idx = torch.topk(gate_probs, k=self.top_k, dim=-1)
        # 使用安全的索引结果：不修改topk的返回值，单独构造最终选择索引
        idx_final = topk_idx.detach().clone()
        if self.training and hasattr(self, 'visual_mandatory_strength') and self.visual_mandatory_strength and self.visual_mandatory_strength > 0 and self.top_k >= 1:
            has_visual = (idx_final == 0).any(dim=-1)
            not_has_visual = ~has_visual
            if not_has_visual.any():
                # 选最弱项位置：根据topk_vals（不修改其本身）找位置
                min_pos = topk_vals.argmin(dim=-1)
                batch_indices = torch.nonzero(not_has_visual, as_tuple=False).squeeze(-1)
                if batch_indices.numel() > 0:
                    idx_final[batch_indices, min_pos[batch_indices]] = 0
        # 为最终索引重新取值，并据此计算权重
        val_final = torch.gather(gate_probs, 1, idx_final)
        topk_weights = self._compute_topk_weights(val_final)

        B = images.shape[0]
        idx_expanded = idx_final.unsqueeze(-1).expand(B, self.top_k, feats.size(-1))
        selected_feats = torch.gather(feats, 1, idx_expanded)
        weights = topk_weights.unsqueeze(-1)
        fused = (selected_feats * weights).sum(dim=1)

        logits = self.classifier(fused)
        lb_loss = self.load_balance_loss(gate_probs)
        return logits, gate_probs, lb_loss

    def load_balance_loss(self, gate_probs: torch.Tensor) -> torch.Tensor:
        target = torch.full((self.num_experts,), 1.0 / self.num_experts, device=gate_probs.device)
        mean_usage = gate_probs.mean(dim=0)
        return F.mse_loss(mean_usage, target)

    def forward_visual_aux(self, images: torch.Tensor) -> torch.Tensor:
        if not self.use_visual_aux:
            raise RuntimeError("visual auxiliary classifier is disabled")
        v_feat = self.visual_expert(images)
        return self.aux_classifier_visual(v_feat)

    def compute_visual_aux_loss(self, images: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
        if not self.use_visual_aux:
            return torch.zeros((), device=labels.device)
        logits_aux = self.forward_visual_aux(images)
        return F.cross_entropy(logits_aux, labels) * self.visual_aux_weight