#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
将BERT模型转换为ONNX格式
"""

import torch
from transformers import BertTokenizer, BertForSequenceClassification
import os

def get_model_size(model_path):
    return sum(os.path.getsize(os.path.join(model_path, f)) for f in os.listdir(model_path) if os.path.isfile(os.path.join(model_path, f)))

def convert_to_onnx():
    # 检查模型是否存在
    model_path = './model_cache/bert_transaction_classifier'
    if not os.path.exists(model_path):
        print("模型不存在，请运行 'python create_model.py' 训练模型。")
        return
    
    # 加载原始模型
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(model_path)
    
    # 打印原始模型大小
    original_size = get_model_size(model_path)
    print(f"原始模型大小: {original_size / (1024*1024):.2f} MB")
    
    # 准备示例输入
    dummy_input = "这是一个测试输入"
    inputs = tokenizer(dummy_input, return_tensors="pt", padding=True, truncation=True, max_length=512)
    
    # 导出到ONNX格式
    onnx_model_path = './model_cache/bert_transaction_classifier.onnx'
    torch.onnx.export(
        model,
        (inputs['input_ids'], inputs['attention_mask']),
        onnx_model_path,
        export_params=True,
        opset_version=11,
        do_constant_folding=True,
        input_names=['input_ids', 'attention_mask'],
        output_names=['logits'],
        dynamic_axes={
            'input_ids': {0: 'batch_size', 1: 'sequence_length'},
            'attention_mask': {0: 'batch_size', 1: 'sequence_length'},
            'logits': {0: 'batch_size'}
        }
    )
    
    # 打印ONNX模型大小
    onnx_size = os.path.getsize(onnx_model_path)
    print(f"ONNX模型大小: {onnx_size / (1024*1024):.2f} MB")
    print(f"ONNX压缩率: {(1 - onnx_size/original_size)*100:.2f}%")
    
    # 验证ONNX模型
    import onnxruntime as ort
    import numpy as np
    
    # 运行推理
    ort_session = ort.InferenceSession(onnx_model_path, providers=['CPUExecutionProvider'])
    outputs = ort_session.run(
        None,
        {
            'input_ids': inputs['input_ids'].cpu().numpy(),
            'attention_mask': inputs['attention_mask'].cpu().numpy()
        }
    )
    
    # 打印结果
    logits = torch.tensor(outputs[0])
    predicted_class_id = logits.argmax().item()
    print(f"预测类别ID: {predicted_class_id}")
    
    # 获取标签
    labels = ["餐饮", "交通", "购物", "娱乐", "医疗", "教育", "生活缴费", "其他"]
    predicted_label = labels[predicted_class_id]
    print(f"预测标签: {predicted_label}")

if __name__ == "__main__":
    convert_to_onnx()