# -*- coding: utf-8 -*-
import torch
import os
import datetime
from settings import path
from transformers.models import BertModel,BertTokenizer,BertConfig

from src import bert_train
class_list = [line.strip() for line in open(path.path_class_txt, encoding='utf-8')]

num_classes = len(class_list)  # 类别数
num_epochs = 2  # epoch数
batch_size = 64  # mini-batch大小
pad_size = 32  # 每句话处理成的长度(短填长切)
learning_rate = 5e-5  # 学习率
import numpy as np

bert_model=BertModel.from_pretrained(path.path_bert_base_chinese)
tokenizer = BertTokenizer.from_pretrained(path.path_bert_base_chinese) # BERT模型的分词器
bert_config = BertConfig.from_pretrained(path.path_bert_base_chinese) # BERT模型的配置
hidden_size = 768 # BERT模型的隐藏层大小
def bert_model_quantization():
    print('加载数据...')
    train_dataloader, test_dataloader, dev_dataloader = bert_train.bert_dataloader()

    # 加载模型
    print("加载模型...")
    device = "cpu"
    model = bert_model.BertClassifier()

    model.load_state_dict(torch.load(path.path_bert_model, map_location='cpu'))  # map_location指定映射到哪个设备上，cpu或gpu
    model.eval()

    # 量化BERT模型
    # quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.LayerNorm}, dtype=torch.qint8)
    quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
    # 检查量化模型中各层的参数数据类型
    print(quantized_model)

    # 测试量化后的模型
    report, f1score, accuracy, precision = bert_train.model2dev(quantized_model, test_dataloader, device)
    print("Test Classification Report:", report)
    print("Test F1:", f1score)
    print("Test Accuracy:", accuracy)
    print("Test Precision:", precision)

    # 保存整个量化模型
    torch.save(quantized_model, path.path_bert_model_quantify)
    print("保存量化模型成功！地址为：", path.path_bert_model_quantify)




















