#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
模型压缩脚本
"""

import torch
from transformers import BertTokenizer, BertForSequenceClassification
import os

def get_model_size(model_path):
    return sum(os.path.getsize(os.path.join(model_path, f)) for f in os.listdir(model_path) if os.path.isfile(os.path.join(model_path, f)))

def compress_model():
    # 加载原始模型
    model_path = './model_cache/bert_transaction_classifier'
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(model_path)
    
    # 打印原始模型大小
    original_size = get_model_size(model_path)
    print(f"原始模型大小: {original_size / (1024*1024):.2f} MB")
    
    # 使用torch.quantization进行量化压缩
    model.eval()
    try:
        # 尝试动态量化
        quantized_model = torch.quantization.quantize_dynamic(
            model, {torch.nn.Linear}, dtype=torch.qint8
        )
        
        # 保存量化后的模型
        quantized_model_path = './model_cache/bert_transaction_classifier_quantized'
        quantized_model.save_pretrained(quantized_model_path)
        tokenizer.save_pretrained(quantized_model_path)
        
        # 打印量化后模型大小
        quantized_size = get_model_size(quantized_model_path)
        print(f"量化后模型大小: {quantized_size / (1024*1024):.2f} MB")
        print(f"量化压缩率: {(1 - quantized_size/original_size)*100:.2f}%")
    except Exception as e:
        print(f"动态量化失败: {e}")
        
        # 保存原始模型作为备份
        backup_model_path = './model_cache/bert_transaction_classifier_backup'
        if not os.path.exists(backup_model_path):
            model.save_pretrained(backup_model_path)
            tokenizer.save_pretrained(backup_model_path)
            print(f"原始模型已备份到 {backup_model_path}")
        else:
            print("备份已存在")

if __name__ == "__main__":
    compress_model()