# -*- coding: utf-8 -*-
# Program function：
import time
import torch
from config import *
from b_model_train import MyBertModel,bert_eval
from a_data_pro import get_loader

def init_model(model_path):
    with torch.no_grad():
        model = MyBertModel()
        if model_path == quantize_bert_model_path:
            model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
        model.load_state_dict(torch.load(model_path))
    return model

model = init_model(bert_model_path)
model2 = init_model(quantize_bert_model_path)

def bert_pre_model(text,model_path):
    my_bret_tokenizer = bert_tokenizer.batch_encode_plus([text], max_length=max_len, padding='max_length', truncation=True, return_tensors="pt")
    input_ids = my_bret_tokenizer["input_ids"]
    attention_mask = my_bret_tokenizer["attention_mask"]
    # with torch.no_grad():
    #     model = MyBertModel()
    #     if model_path == quantize_bert_model_path:
    #         model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
    #     model.load_state_dict(torch.load(model_path))
    if model_path == quantize_bert_model_path:
        result = num2class[torch.argmax(model2(input_ids, attention_mask), dim=-1).item()]
    else:
        result = num2class[torch.argmax(model(input_ids, attention_mask), dim=-1).item()]
    return result

if __name__ == '__main__':
    data_dict = {'text': '状元心经：考前一周重点是回顾和整理'}
    start = time.time()
    print(bert_pre_model(data_dict['text'], bert_model_path))
    end_time = time.time()
    print(bert_pre_model(data_dict['text'], quantize_bert_model_path))
    print('bert_time:', end_time - start, 'quantize_bert_time:', time.time() - end_time)
    # model = MyBertModel()
    # model.load_state_dict(torch.load(quantize_bert_model_path))
    # train_loader, test_loader, dev_loader = get_loader(10000)
    # with torch.no_grad():
    #     print(bert_eval(model, test_loader))

# bert pt
# fasttext bin
# rd_model pkl
# 约定俗成，机器学习用pkl,大语言用pt，fasttext官网说用bin