import torch
from torch import nn
from torch.optim import AdamW
from tqdm import tqdm
from sklearn.metrics import accuracy_score, f1_score
import torch.nn.functional as F
# 添加量化和剪枝相关的导入
from torch.quantization import quantize_dynamic
import torch.nn.utils.prune as prune

from src.bert.my_dataloader import my_dataLoader
from src.bert.my_model import BertClassifier
from utils.config import Config


# 学生模型定义（更小的BERT模型）
class StudentBertClassifier(nn.Module):
    def __init__(self, config):
        super(StudentBertClassifier, self).__init__()
        # 使用较小的隐藏层维度
        self.config = config
        self.bert = BertClassifier().bert
        # 学生模型的分类头
        combined_dim = config.embed_dim * 2
        self.fc_cat = nn.Linear(combined_dim, config.num_cat_classes)
        self.fc_label = nn.Linear(combined_dim, 1)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)

        # [CLS] 向量
        cls = outputs.last_hidden_state[:, 0]

        # mean pooling
        mask_expanded = attention_mask.unsqueeze(-1).expand(outputs.last_hidden_state.size())
        sum_embeddings = torch.sum(outputs.last_hidden_state * mask_expanded, 1)
        sum_mask = torch.clamp(mask_expanded.sum(1), min=1e-9)
        mean_pool = sum_embeddings / sum_mask

        # 拼接 [CLS] 和 mean pooling
        features = torch.cat([cls, mean_pool], dim=1)

        # 两个任务的输出
        out_cat = self.fc_cat(features)
        out_label = self.fc_label(features).squeeze(-1)

        return out_cat, out_label, outputs.last_hidden_state


def distillation_loss(student_logits, teacher_logits, student_hidden, teacher_hidden,
                      true_labels, alpha=0.7, temperature=3.0):
    """
    计算蒸馏损失
    """
    # 分类任务的蒸馏损失
    soft_student = F.log_softmax(student_logits / temperature, dim=1)
    soft_teacher = F.softmax(teacher_logits / temperature, dim=1)

    distill_loss = F.kl_div(soft_student, soft_teacher, reduction='batchmean') * (temperature ** 2)

    # 真实标签损失
    true_loss = F.cross_entropy(student_logits, true_labels)

    # 隐藏层特征蒸馏损失
    hidden_loss = F.mse_loss(student_hidden, teacher_hidden)

    # 组合损失
    total_loss = alpha * distill_loss + (1 - alpha) * true_loss + 0.1 * hidden_loss

    return total_loss


def prune_model(model, pruning_ratio=0.3):
    """
    对模型进行剪枝
    """
    parameters_to_prune = []
    for name, module in model.named_modules():
        if isinstance(module, nn.Linear) and 'fc' in name:
            parameters_to_prune.append((module, 'weight'))

    for module, param_name in parameters_to_prune:
        prune.l1_unstructured(module, name=param_name, amount=pruning_ratio)

    return model


def quantize_model(model):
    """
    对模型进行量化
    """
    # 使用动态量化
    quantized_model = quantize_dynamic(
        model, {nn.Linear}, dtype=torch.qint8
    )
    return quantized_model


def train_distilled_model():
    config = Config('E:/Python+AI/group4_nlp_project')
    train_loader = my_dataLoader(config.train_path)

    # 加载教师模型（预训练的大模型）
    teacher_model = BertClassifier()
    teacher_model.load_state_dict(torch.load(config.save_model + 'bt_model.pth'))
    teacher_model.to(config.device)
    teacher_model.eval()  # 教师模型设置为评估模式

    # 创建学生模型
    student_model = StudentBertClassifier(config)
    student_model.to(config.device)

    optimizer = AdamW(student_model.parameters(), lr=2e-5)

    for epoch in range(3):  # 蒸馏训练轮数可以较少
        student_model.train()
        total_loss = 0

        all_cat_preds = []
        all_cat_labels = []
        all_label_preds = []
        all_label_labels = []

        for batch_idx, (input_ids, attention_mask, labels, cat_labels) in enumerate(tqdm(train_loader)):
            input_ids = input_ids.to(config.device)
            attention_mask = attention_mask.to(config.device)
            labels = labels.float().to(config.device)
            cat_labels = cat_labels.long().to(config.device)

            # 获取教师模型的输出（不计算梯度）
            with torch.no_grad():
                teacher_cat_out, teacher_label_out = teacher_model(input_ids, attention_mask)
                # 获取教师模型的隐藏状态
                teacher_outputs = teacher_model.bert(input_ids=input_ids, attention_mask=attention_mask)
                teacher_hidden_states = teacher_outputs.last_hidden_state

            # 学生模型前向传播
            student_cat_out, student_label_out, student_hidden_states = student_model(input_ids, attention_mask)

            # 计算蒸馏损失
            cat_distill_loss = distillation_loss(
                student_cat_out, teacher_cat_out,
                student_hidden_states, teacher_hidden_states,
                cat_labels
            )

            label_loss_fn = nn.BCEWithLogitsLoss()
            label_loss = label_loss_fn(student_label_out, labels)

            loss = cat_distill_loss + label_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()

            # 收集预测结果
            _, predicted_cat = torch.max(student_cat_out.data, 1)
            predicted_labels = (torch.sigmoid(student_label_out) > 0.5).float().squeeze()

            all_cat_preds.extend(predicted_cat.cpu().numpy())
            all_cat_labels.extend(cat_labels.cpu().numpy())
            all_label_preds.extend(predicted_labels.cpu().numpy())
            all_label_labels.extend(labels.cpu().numpy())

            if batch_idx % 100 == 0:
                avg_loss = total_loss / (batch_idx + 1)
                cat_acc = accuracy_score(all_cat_labels, all_cat_preds)
                label_acc = accuracy_score(all_label_labels, all_label_preds)
                print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item():.4f}, '
                      f'Avg Loss: {avg_loss:.4f}, Cat Acc: {cat_acc:.4f}, Label Acc: {label_acc:.4f}')

        # 计算整个epoch的准确率
        epoch_cat_acc = accuracy_score(all_cat_labels, all_cat_preds)
        epoch_label_acc = accuracy_score(all_label_labels, all_label_preds)
        epoch_cat_f1 = f1_score(all_cat_labels, all_cat_preds, average='weighted')
        epoch_label_f1 = f1_score(all_label_labels, all_label_preds, average='weighted')
        print(f'Epoch {epoch} finished. Cat Acc: {epoch_cat_acc:.4f}, Label Acc: {epoch_label_acc:.4f}, '
              f'Cat f1: {epoch_cat_f1:.4f}, Label f1: {epoch_label_f1:.4f}')

        # 保存蒸馏后的模型
        torch.save(student_model.state_dict(), config.save_model + 'bt_model_distilled.pth')

    # 训练完成后进行剪枝
    print("开始模型剪枝...")
    pruned_model = prune_model(student_model, pruning_ratio=0.3)
    torch.save(pruned_model.state_dict(), config.save_model + 'bt_model_pruned.pth')

    # 进行量化
    print("开始模型量化...")
    # 需要先移除剪枝的参数才能进行量化
    for module in pruned_model.modules():
        if isinstance(module, nn.Linear):
            if hasattr(module, 'weight_orig'):
                prune.remove(module, 'weight')
            if hasattr(module, 'bias_orig') and module.bias is not None:
                prune.remove(module, 'bias')

    quantized_model = quantize_model(pruned_model)
    torch.save(quantized_model.state_dict(), config.save_model + 'bt_model_quantized.pth')
    print("模型优化完成！")


if __name__ == '__main__':
    train_distilled_model()
