#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
模型剪枝脚本
"""

import torch
import torch.nn.utils.prune as prune
from transformers import BertTokenizer, BertForSequenceClassification
import os

def get_model_size(model_path):
    return sum(os.path.getsize(os.path.join(model_path, f)) for f in os.listdir(model_path) if os.path.isfile(os.path.join(model_path, f)))

def prune_model():
    # 加载原始模型
    model_path = './model_cache/bert_transaction_classifier'
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(model_path)
    
    # 打印原始模型大小
    original_size = get_model_size(model_path)
    print(f"原始模型大小: {original_size / (1024*1024):.2f} MB")
    
    # 对模型进行剪枝
    # 对BERT的注意力机制中的线性层进行剪枝
    for layer in model.bert.encoder.layer:
        # 剪枝自注意力机制中的查询、键、值矩阵
        prune.l1_unstructured(layer.attention.self.query, name='weight', amount=0.3)
        prune.l1_unstructured(layer.attention.self.key, name='weight', amount=0.3)
        prune.l1_unstructured(layer.attention.self.value, name='weight', amount=0.3)
        # 剪枝自注意力输出层
        prune.l1_unstructured(layer.attention.output.dense, name='weight', amount=0.3)
        # 剪枝中间层
        prune.l1_unstructured(layer.intermediate.dense, name='weight', amount=0.3)
        # 剪枝输出层
        prune.l1_unstructured(layer.output.dense, name='weight', amount=0.3)
    
    # 剪枝分类器层
    prune.l1_unstructured(model.classifier, name='weight', amount=0.3)
    
    # 保存剪枝后的模型
    pruned_model_path = './model_cache/bert_transaction_classifier_pruned'
    model.save_pretrained(pruned_model_path)
    tokenizer.save_pretrained(pruned_model_path)
    
    # 打印剪枝后模型大小
    pruned_size = get_model_size(pruned_model_path)
    print(f"剪枝后模型大小: {pruned_size / (1024*1024):.2f} MB")
    print(f"剪枝压缩率: {(1 - pruned_size/original_size)*100:.2f}%")

if __name__ == "__main__":
    prune_model()