import os
import torch
from bert_seq2seq import Tokenizer
from bert_seq2seq import load_model
from bert_seq2seq import Predictor

model_name = "roberta" # 选择模型名字
task_name = "classification"
vocab_path = "../../../large-model-data/roberta_bnuask/vocab.txt" # roberta模型字典的位置
model_save_path = "../../../large-model-data/roberta_bnuask/roberta_bnuask_cls.bin"

# 加载字典
tokenizer = Tokenizer(vocab_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

target = ["120周年校庆工作办公室", "保卫处", "财经处", "昌平校区综合管理办公室", "党委办公室/校长办公室", "党委教师工作部", "党委统战部", "党委宣传部", "党委学生工作部", "党委巡视工作办公室", "党委组织部", "党校办", "档案馆", "发展规划处", "国际交流与合作处", "国有资产管理处", "机关党委", "基础教育发展管理部", "纪检监察处", "教务部（研究生院）", "科研院", "离退休处", "内控办", "培训与基础教育管理处", "人才人事处", "审计处", "实验室安全与设备管理处", "图书馆", "校工会", "校团委", "校医院", "校友总会", "信息网络中心", "珠海园区领导小组办公室", "总务部", "总务长办公室（后勤管理处）"]

def main():
    bert_model = load_model(tokenizer.vocab, model_name=model_name, task_name=task_name, target_size=len(target))
    bert_model.load_all_params(model_save_path, device)
    predictor = Predictor(bert_model, tokenizer)

    text = ["老师您好，我想反馈下游泳馆开放时间的问题",
            "食堂的菜比较难吃啊，特别是教工食堂",
            "为什么我得等半个月才可以使用财经平台"]

    for t in text:
        ids = predictor.predict_cls_classifier(t).argmax(dim=0)
        print(target[ids])

if __name__ == '__main__':
    main()
