import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from train import evaluate_model

def student_loss(student_preds, target):
    # 使用交叉熵公式计算损失
    log_probs = torch.log(student_preds)
    loss = -torch.sum(target * log_probs) / target.size(0)
    return loss

def distillation_loss(teacher_preds, student_preds, temp):
    # 将教师和学生的预测通过温度缩放
    teacher_probs = F.softmax(teacher_preds / temp, dim=1)
    student_log_probs = F.log_softmax(student_preds / temp, dim=1)
    
    # 计算KL散度损失
    loss = torch.sum(teacher_probs * (torch.log(teacher_probs) - student_log_probs)) / teacher_preds.size(0)
    
    return loss


def knowledge_distillation(teacher_model, student_model, train_dataloader, test_dataloader):
    print("beginning knowledge distillation")
    teacher_model.eval()
    student_model.train()

    temp = 1        # 蒸馏温度 
    alpha = 0.5     # 蒸馏损失权重
    optimizer = torch.optim.Adam(student_model.parameters(), lr=5e-5)

    epochs = 1
    iter = 0
    for epoch in range(epochs):
        for batch in train_dataloader:
            
            iter += 1
            input_ids = batch["input_ids"].to(student_model.device)
            attention_mask = batch["attention_mask"].to(student_model.device)
            labels = batch["labels"].to(student_model.device)
            
            # 教师模型预测
            with torch.no_grad():
                teacher_logits = teacher_model(input_ids, attention_mask=attention_mask).logits
            teacher_probs = F.softmax(teacher_logits, dim=1)
            # 学生模型预测
            student_logits = student_model(input_ids, attention_mask=attention_mask).logits
            student_probs = F.softmax(student_logits, dim=1)

            num_classes = student_probs.size(1)
            target = F.one_hot(labels, num_classes=num_classes).float()
            
            loss_student = student_loss(student_probs, target)
            loss_distillation = distillation_loss(teacher_probs, student_probs, temp)
            loss = alpha * loss_student + (1 - alpha) * loss_distillation

            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print("Epoch: {}, iter: {}, loss : {:.4f}".format(
                epoch, iter, loss.item()
            ))

            if iter % 100 == 0:
                acc = evaluate_model(student_model, test_dataloader, student_model.device)
                print(f"Iteration {iter}, Loss: {loss.item()}, Accuracy: {acc}")
    return student_model



def output_distribution(teacher_model, student_model, test_dataloader):
    print("Output distribution")
    teacher_model.eval()
    student_model.eval()
    temp = 1        # 蒸馏温度

    all_teacher_preds = []
    all_student_preds = []
    all_differences = []
    
    for batch in tqdm(test_dataloader):

        input_ids = batch["input_ids"].to(student_model.device)
        attention_mask = batch["attention_mask"].to(student_model.device)
        labels = batch["labels"].to(student_model.device)
        with torch.no_grad():
            teacher_logits = teacher_model(input_ids, attention_mask=attention_mask).logits
            teacher_probs = F.softmax(teacher_logits, dim=1)
            student_logits = student_model(input_ids, attention_mask=attention_mask).logits
            student_probs = F.softmax(student_logits, dim=1)
            
        # # 计算预测差异（绝对差值）
        # difference = torch.abs(teacher_preds - student_preds)  # 28x3
        # all_teacher_preds.append(teacher_preds)
        # all_student_preds.append(student_preds)
        # all_differences.append(difference)
        # 获取每个样本中最大类别的概率值
        teacher_max_probs, _ = torch.max(teacher_logits, dim=1)  # 28
        student_max_probs, _ = torch.max(student_logits, dim=1)  # 28

        # 计算最大类别概率的差异
        difference = torch.abs(teacher_max_probs - student_max_probs)  # 28
        all_teacher_preds.append(teacher_max_probs)
        all_student_preds.append(student_max_probs)
        all_differences.append(difference)

    # 将所有batch的结果拼接
    all_teacher_preds = torch.cat(all_teacher_preds, dim=0).cpu().numpy()
    all_student_preds = torch.cat(all_student_preds, dim=0).cpu().numpy()
    all_differences = torch.cat(all_differences, dim=0).cpu().numpy()

    # 绘制概率分布图
    plt.figure(figsize=(12, 6))

    # 教师模型预测分布
    plt.subplot(1, 3, 1)
    plt.hist(all_teacher_preds.flatten(), bins=50, alpha=0.7, color='blue', label='Teacher')
    plt.title("Teacher Model Prediction Distribution")
    plt.xlabel("Probability")
    plt.ylabel("Frequency")
    plt.legend()

    # 学生模型预测分布
    plt.subplot(1, 3, 2)
    plt.hist(all_student_preds.flatten(), bins=50, alpha=0.7, color='green', label='Student')
    plt.title("Student Model Prediction Distribution")
    plt.xlabel("Probability")
    plt.ylabel("Frequency")
    plt.legend()

    # 差异分布
    plt.subplot(1, 3, 3)
    plt.hist(all_differences.flatten(), bins=50, alpha=0.7, color='red', label='Difference')
    plt.title("Prediction Difference Distribution")
    plt.xlabel("Absolute Difference")
    plt.ylabel("Frequency")
    plt.legend()

    plt.tight_layout()
    plt.show()

    return 0