import torch
import torch.nn.functional as F
import numpy as np
from loguru import logger


class MembershipInferenceAttacker:
    def __init__(self, target_model, dt_loader, test_loader, device):
        """
        target_model: 待攻击的目标模型（MKD的学生模型）
        dt_loader: 成员数据集加载器（D_T，训练过的样本）
        test_loader: 非成员数据集加载器（测试集，未训练的样本）
        """
        self.target_model = target_model
        self.dt_loader = dt_loader  # 成员数据
        self.test_loader = test_loader  # 非成员数据
        self.device = device
        self.target_model.eval()

    def get_model_outputs(self, data_loader):
        """获取模型对数据集的输出（logits、置信度、熵）"""
        logits_list = []
        confidences_list = []
        entropies_list = []
        labels_list = []

        with torch.no_grad():
            for x, y in data_loader:
                x = x.to(self.device)
                logits = self.target_model(x)
                confidences = F.softmax(logits, dim=1).max(dim=1)[0]  # 最大置信度
                entropies = -torch.sum(F.softmax(logits, dim=1) * F.log_softmax(logits, dim=1), dim=1)  # 预测熵

                logits_list.append(logits.cpu())
                confidences_list.append(confidences.cpu())
                entropies_list.append(entropies.cpu())
                labels_list.append(y.cpu())

        return (
            torch.cat(logits_list),
            torch.cat(confidences_list),
            torch.cat(entropies_list),
            torch.cat(labels_list)
        )

    def attack_A_corr(self):
        """A_corr：基于预测准确性的攻击（论文3.3.2）"""
        # 成员数据：D_T的预测准确性
        dt_logits, _, _, dt_labels = self.get_model_outputs(self.dt_loader)
        dt_preds = dt_logits.argmax(dim=1)
        dt_correct = (dt_preds == dt_labels).float()

        # 非成员数据：测试集的预测准确性
        test_logits, _, _, test_labels = self.get_model_outputs(self.test_loader)
        test_preds = test_logits.argmax(dim=1)
        test_correct = (test_preds == test_labels).float()

        # 攻击逻辑：正确预测→成员，错误→非成员
        threshold = 0.5  # 准确性阈值（0.5即正确为1，错误为0）
        dt_pred_membership = (dt_correct >= threshold).int()  # 成员数据预测为1
        test_pred_membership = (test_correct >= threshold).int()  # 非成员数据预测为0

        # 计算攻击准确率：(正确预测成员数 + 正确预测非成员数) / 总样本数
        true_positive = (dt_pred_membership == 1).sum().item()
        true_negative = (test_pred_membership == 0).sum().item()
        total = len(dt_pred_membership) + len(test_pred_membership)
        attack_acc = (true_positive + true_negative) / total
        attack_advantage = attack_acc - 0.5  # 优势率=攻击准确率-随机猜测（0.5）
        return attack_acc, attack_advantage

    def attack_A_conf(self):
        """A_conf：基于置信度阈值的攻击（论文3.3.2）"""
        dt_conf, test_conf = self.get_model_outputs(self.dt_loader)[1], self.get_model_outputs(self.test_loader)[1]

        # 计算置信度阈值（取成员和非成员置信度的中位数）
        all_conf = torch.cat([dt_conf, test_conf])
        threshold = all_conf.median().item()

        # 攻击逻辑：置信度≥阈值→成员，否则→非成员
        dt_pred_membership = (dt_conf >= threshold).int()
        test_pred_membership = (test_conf >= threshold).int()

        # 计算攻击准确率和优势率
        true_positive = (dt_pred_membership == 1).sum().item()
        true_negative = (test_pred_membership == 0).sum().item()
        total = len(dt_pred_membership) + len(test_pred_membership)
        attack_acc = (true_positive + true_negative) / total
        attack_advantage = attack_acc - 0.5
        return attack_acc, attack_advantage

    def attack_A_ent(self):
        """A_ent：基于预测熵的攻击（论文3.3.2）"""
        dt_ent, test_ent = self.get_model_outputs(self.dt_loader)[2], self.get_model_outputs(self.test_loader)[2]

        # 熵阈值：成员数据熵更低（模型更确定），取中位数
        all_ent = torch.cat([dt_ent, test_ent])
        threshold = all_ent.median().item()

        # 攻击逻辑：熵≤阈值→成员，否则→非成员
        dt_pred_membership = (dt_ent <= threshold).int()
        test_pred_membership = (test_ent <= threshold).int()

        # 计算指标
        true_positive = (dt_pred_membership == 1).sum().item()
        true_negative = (test_pred_membership == 0).sum().item()
        total = len(dt_pred_membership) + len(test_pred_membership)
        attack_acc = (true_positive + true_negative) / total
        attack_advantage = attack_acc - 0.5
        return attack_acc, attack_advantage

    def attack_A_mentr(self):
        """A_mentr：基于修正预测熵的攻击（论文3.3.2，用真实标签修正熵）"""
        # 获取成员数据的修正熵：仅计算真实标签类别的熵贡献
        dt_logits, _, _, dt_labels = self.get_model_outputs(self.dt_loader)
        dt_probs = F.softmax(dt_logits, dim=1)
        dt_true_probs = dt_probs[range(len(dt_probs)), dt_labels]  # 真实标签的概率
        dt_mentr = -dt_true_probs * torch.log(dt_true_probs + 1e-10)  # 修正熵（仅真实类别）

        # 非成员数据的修正熵
        test_logits, _, _, test_labels = self.get_model_outputs(self.test_loader)
        test_probs = F.softmax(test_logits, dim=1)
        test_true_probs = test_probs[range(len(test_probs)), test_labels]
        test_mentr = -test_true_probs * torch.log(test_true_probs + 1e-10)

        # 修正熵阈值
        all_mentr = torch.cat([dt_mentr, test_mentr])
        threshold = all_mentr.median().item()

        # 攻击逻辑：修正熵≤阈值→成员
        dt_pred_membership = (dt_mentr <= threshold).int()
        test_pred_membership = (test_mentr <= threshold).int()

        # 计算指标
        true_positive = (dt_pred_membership == 1).sum().item()
        true_negative = (test_pred_membership == 0).sum().item()
        total = len(dt_pred_membership) + len(test_pred_membership)
        attack_acc = (true_positive + true_negative) / total
        attack_advantage = attack_acc - 0.5
        return attack_acc, attack_advantage

    def run_all_attacks(self):
        """运行所有4种攻击，返回结果"""
        attacks = {
            "A_corr": self.attack_A_corr(),
            "A_conf": self.attack_A_conf(),
            "A_ent": self.attack_A_ent(),
            "A_mentr": self.attack_A_mentr()
        }
        logger.info("===== 成员推理攻击评估结果 =====")
        for attack_name, (acc, advantage) in attacks.items():
            logger.info(f"{attack_name}：攻击准确率={acc:.4f}，优势率={advantage:.4f}")
        return attacks