###西語短信大類分類模型

import os
import pickle as pkl
from feature_set.sms.un.sms_th_loan0_v1.SmsThLoan0V1Tokenizer import SmsThLoan0V1Tokenizer
import onnxruntime as ort


def get_root_dir(path):
    path_list=path.split(os.path.sep)
    index=path_list.index("featurelib")
    return os.path.sep.join(path_list[:index+1])


class SmsThLoan0V1ClfModel:
    def __init__(self):
        self.UNK = "<UNK>"
        self.PAD = "<PAD>"
        self.tokenizer = SmsThLoan0V1Tokenizer()
        
        
        self.ROOT_DIR = get_root_dir(os.path.abspath("."))
        self.CONF_DIR = os.path.join(
             self.ROOT_DIR, "feature_conf", "sms", "un", "sms_th_loan0_v1"
         )
        self.MODEL_CONF_DIR = os.path.join(self.CONF_DIR, "model_conf")
        self.vocab = pkl.load(open(os.path.join(self.MODEL_CONF_DIR, 'vocab.pkl'), "rb"))
        self.model_path = os.path.join(self.MODEL_CONF_DIR, 'model')
        self.class_list = [
             x.strip()
             for x in open(
                 os.path.join(self.MODEL_CONF_DIR, "class.txt"), encoding="utf-8"
             ).readlines()
         ]
        self.session_options = ort.SessionOptions()
        self.session_options.intra_op_num_threads = 4
        # self.session_options.inter_op_num_threads = 8
        self.session = ort.InferenceSession(self.model_path, sess_options=self.session_options)
        
    def load_dataset(self, msg_list, pad_size=32):
        words_line_list = []
        for msg in msg_list:
            words_line = []
            token = self.tokenizer.seg_content(msg)
            if pad_size:
                if len(token) < pad_size:
                    token.extend([self.PAD] * (pad_size - len(token)))
                else:
                    token = token[:pad_size]
            for word in token:
                words_line.append(self.vocab.get(word, self.vocab.get(self.UNK)))
            words_line_list.append(words_line)
        return words_line_list

    def predict(self, text_list):
        if len(text_list) == 0:
            return []
        x = self.load_dataset(text_list)
        outputs = self.session.run(None, {'input_ids': x})[0]
        preds = [self.class_list[i] for i in outputs]
        return preds