import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
from loguru import logger
from data_loader import get_data_loaders
import sys
import os
# 解决中文路径编码问题


import locale
locale.setlocale(locale.LC_ALL, 'zh_CN.UTF-8')

# 1. 基准模型定义（论文用AlexNet、ResNet18，此处以ResNet18为例，适配图像和表格数据）
class ResNet18ForTabular(nn.Module):
    """适配表格数据的ResNet18（修改输入层，移除卷积层）"""

    def __init__(self, input_dim, num_classes=100):
        super().__init__()
        self.fc1 = nn.Linear(input_dim, 256)
        self.bn1 = nn.BatchNorm1d(256)
        self.fc2 = nn.Linear(256, 512)
        self.bn2 = nn.BatchNorm1d(512)
        self.fc3 = nn.Linear(512, num_classes)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        x = self.relu(self.bn1(self.fc1(x)))
        x = self.dropout(x)
        x = self.relu(self.bn2(self.fc2(x)))
        x = self.dropout(x)
        x = self.fc3(x)
        return x


# 2. MKD核心训练类
class MKD_Trainer:
    def __init__(self, dataset_name, model_type="resnet18", num_classes=100, lr=1e-3, epochs=50, batch_size=512):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.dataset_name = dataset_name
        self.model_type = model_type
        self.num_classes = num_classes
        self.lr = lr
        self.epochs = epochs
        self.batch_size = batch_size

        # 加载数据
        self.dt_loader, self.ds_loader, self.test_loader = get_data_loaders(dataset_name, batch_size)

        # 初始化教师/学生模型（根据数据集类型选择模型）
        if dataset_name == "cifar100":
            # 图像数据用标准ResNet18
            from torchvision.models import resnet18
            self.teacher_model = resnet18(pretrained=False, num_classes=num_classes).to(self.device)
            self.student_model = resnet18(pretrained=False, num_classes=num_classes).to(self.device)
        else:
            # 表格数据用自定义ResNet18
            input_dim = next(iter(self.dt_loader))[0].shape[1]  # 获取特征维度
            self.teacher_model = ResNet18ForTabular(input_dim, num_classes).to(self.device)
            self.student_model = ResNet18ForTabular(input_dim, num_classes).to(self.device)

        # 优化器和损失函数
        self.teacher_optimizer = optim.Adam(self.teacher_model.parameters(), lr=lr, weight_decay=1e-5)
        self.student_optimizer = optim.Adam(self.student_model.parameters(), lr=lr, weight_decay=1e-5)
        self.cross_entropy_loss = nn.CrossEntropyLoss()

    def get_soft_label(self, model, data_loader):
        """获取模型对数据集的软标签（论文4.2.2：软标签生成）"""
        model.eval()
        soft_labels = []
        with torch.no_grad():
            for x, _ in data_loader:
                x = x.to(self.device)
                logits = model(x)
                soft_label = F.softmax(logits, dim=1)  # 软标签：模型输出的概率分布
                soft_labels.append(soft_label)
        return torch.cat(soft_labels, dim=0)

    def train_student(self, teacher_soft_labels, ds_loader):
        """学生模型训练（论文4.2.2：学生知识迁移）"""
        self.student_model.train()
        total_loss = 0.0
        total_acc = 0.0
        beta = 0.5  # 论文中β权重，可调整
        ds_iter = iter(ds_loader)

        for i, (x, y) in enumerate(ds_iter):
            x, y = x.to(self.device), y.to(self.device)
            # 获取当前批次的教师软标签
            batch_soft_labels = teacher_soft_labels[i * self.batch_size: (i + 1) * self.batch_size].to(self.device)

            # 学生模型输出
            student_logits = self.student_model(x)
            student_pred = torch.argmax(student_logits, dim=1)

            # 损失计算：L = β*L_tkd（软标签KL散度） + (1-β)*L_sh（硬标签交叉熵）
            L_tkd = F.kl_div(F.log_softmax(student_logits, dim=1), batch_soft_labels, reduction="batchmean")
            L_sh = self.cross_entropy_loss(student_logits, y)
            loss = beta * L_tkd + (1 - beta) * L_sh

            # 反向传播
            self.student_optimizer.zero_grad()
            loss.backward()
            self.student_optimizer.step()

            # 统计损失和准确率
            total_loss += loss.item() * x.size(0)
            total_acc += (student_pred == y).sum().item()

        avg_loss = total_loss / len(ds_loader.dataset)
        avg_acc = total_acc / len(ds_loader.dataset)
        return avg_loss, avg_acc

    def train_teacher(self, student_soft_labels, dt_loader):
        """教师模型训练（论文4.2.2：教师知识迁移）"""
        self.teacher_model.train()
        total_loss = 0.0
        total_acc = 0.0
        beta = 0.5
        dt_iter = iter(dt_loader)

        for i, (x, y) in enumerate(dt_iter):
            x, y = x.to(self.device), y.to(self.device)
            # 获取当前批次的学生软标签
            batch_soft_labels = student_soft_labels[i * self.batch_size: (i + 1) * self.batch_size].to(self.device)

            # 教师模型输出
            teacher_logits = self.teacher_model(x)
            teacher_pred = torch.argmax(teacher_logits, dim=1)

            # 损失计算（与学生模型对称）
            L_skd = F.kl_div(F.log_softmax(teacher_logits, dim=1), batch_soft_labels, reduction="batchmean")
            L_th = self.cross_entropy_loss(teacher_logits, y)
            loss = beta * L_skd + (1 - beta) * L_th

            # 反向传播
            self.teacher_optimizer.zero_grad()
            loss.backward()
            self.teacher_optimizer.step()

            # 统计
            total_loss += loss.item() * x.size(0)
            total_acc += (teacher_pred == y).sum().item()

        avg_loss = total_loss / len(dt_loader.dataset)
        avg_acc = total_acc / len(dt_loader.dataset)
        return avg_loss, avg_acc

    def evaluate_model(self, model, data_loader):
        """评估模型准确率（论文3.3.2：任务准确率计算）"""
        model.eval()
        total_acc = 0.0
        with torch.no_grad():
            for x, y in data_loader:
                x, y = x.to(self.device), y.to(self.device)
                logits = model(x)
                pred = torch.argmax(logits, dim=1)
                total_acc += (pred == y).sum().item()
        avg_acc = total_acc / len(data_loader.dataset)
        return avg_acc

    def run_mkd(self):
        """运行MKD交互博弈蒸馏（论文4.2.3：多轮交互训练）"""
        logger.add(f"./results/logs/mkd_{self.dataset_name}_{self.model_type}.log")
        logger.info(f"MKD训练开始：数据集={self.dataset_name}，模型={self.model_type}，设备={self.device}")

        for epoch in range(1, self.epochs + 1):
            logger.info(f"===== 第{epoch}/{self.epochs}轮 =====")

            # 步骤1：教师模型在D_T上预训练（初始轮次）/ 用学生软标签更新（后续轮次）
            if epoch == 1:
                # 初始轮次：教师用D_T的硬标签训练
                self.teacher_model.train()
                teacher_loss, teacher_acc = 0.0, 0.0
                for x, y in self.dt_loader:
                    x, y = x.to(self.device), y.to(self.device)
                    logits = self.teacher_model(x)
                    loss = self.cross_entropy_loss(logits, y)
                    self.teacher_optimizer.zero_grad()
                    loss.backward()
                    self.teacher_optimizer.step()
                    teacher_loss += loss.item() * x.size(0)
                    teacher_acc += (torch.argmax(logits, dim=1) == y).sum().item()
                teacher_avg_loss = teacher_loss / len(self.dt_loader.dataset)
                teacher_avg_acc = teacher_acc / len(self.dt_loader.dataset)
                logger.info(f"初始教师训练：损失={teacher_avg_loss:.4f}，准确率={teacher_avg_acc:.4f}")

            # 步骤2：获取教师对D_S的软标签，训练学生模型
            teacher_soft_labels = self.get_soft_label(self.teacher_model, self.ds_loader)
            student_loss, student_acc = self.train_student(teacher_soft_labels, self.ds_loader)
            logger.info(f"学生训练：损失={student_loss:.4f}，准确率={student_acc:.4f}")

            # 步骤3：获取学生对D_T的软标签，训练教师模型
            student_soft_labels = self.get_soft_label(self.student_model, self.dt_loader)
            teacher_loss, teacher_acc = self.train_teacher(student_soft_labels, self.dt_loader)
            logger.info(f"教师训练：损失={teacher_loss:.4f}，准确率={teacher_acc:.4f}")

            # 步骤4：评估当前轮次的模型性能（测试集，非成员数据）
            teacher_test_acc = self.evaluate_model(self.teacher_model, self.test_loader)
            student_test_acc = self.evaluate_model(self.student_model, self.test_loader)
            logger.info(f"测试集准确率：教师={teacher_test_acc:.4f}，学生={student_test_acc:.4f}")

            # 步骤5：保存每10轮的模型权重
            if epoch % 10 == 0:
                torch.save(self.teacher_model.state_dict(),
                           f"E:/xt/雷达/培训/code/checkpoints/teacher_{self.dataset_name}_{self.model_type}_epoch{epoch}.pth")
                torch.save(self.student_model.state_dict(),
                           f"E:/xt/雷达/培训/code/checkpoints/student_{self.dataset_name}_{self.model_type}_epoch{epoch}.pth")

        # 训练结束：保存最终模型
        torch.save(self.teacher_model.state_dict(),
                   f"E:/xt/雷达/培训/code/checkpoints/teacher_{self.dataset_name}_{self.model_type}_final.pth")
        torch.save(self.student_model.state_dict(),
                   f"E:/xt/雷达/培训/code/checkpoints/student_{self.dataset_name}_{self.model_type}_final.pth")
        logger.info("MKD训练完成，模型已保存至./checkpoints/")