import os
import json
import numpy as np

from nlp_tools.utils import load_data_object

class BaseTfServerUtils():
    def __init__(self,text_processor=None,label_processor=None,model_node_names=None):
        self.text_processor = text_processor
        self.label_processor = label_processor
        self.model_node_names = model_node_names

    @classmethod
    def load_model(cls, model_path: str):
        model_config_path = os.path.join(model_path, 'model_config.json')
        model_config = json.loads(open(model_config_path, 'r', encoding='utf-8').read())

        text_processor = load_data_object(model_config['text_processor'])
        label_processor = load_data_object(model_config['label_processor'])
        model_node_names = model_config['model_node_names']

        model = cls(text_processor,label_processor,model_node_names)

        return model


    def get_tfserver_inputs(self,input_data):
        '''
        根据输入，生成tf-server需要的输入数据格式
        '''
        segment_texts = self.text_processor.text_tokenizer.tokenize(input_data)
        tensor_datas = self.text_processor.transform(segment_texts)

        tensor_dict = {"inputs":{}}
        for key,value in zip(self.model_node_names['inputs'],tensor_datas):
            tensor_dict["inputs"][key] = value
        tensor_dict["outputs"] = self.model_node_names['outputs']
        return tensor_dict,segment_texts



    def get_tfserver_predict_labels(self,origin_sentences,preditions):

        tokened_sentences = [self.text_processor.text_tokenizer.tokenize(sentence) for sentence in origin_sentences]
        lengths = [len(sen) for sen in tokened_sentences]

        pred = np.array(preditions).argmax(-1)
        pred_labels = self.label_processor.inverse_transform(pred,lengths=lengths)
        return pred_labels


if __name__ == '__main__':
    save_path = '/home/qiufengfeng/nlp/train_models/ner_ignore/'
    obj = BaseTfServerUtils.load_model(save_path)
