# encoding=utf-8
"""
@author: xiao nian
@contact: xiaonian030@163.com
@time: 2021-12-14 15:15
"""
import numpy as np
import requests
from utils.response import api_response
from fastapi import FastAPI
from kashgari.processors import load_processors_from_model
from config.config import MODEL_CONFIG, HTTP_CONFIG
import uvicorn

app = FastAPI()

model_path = MODEL_CONFIG['directory'] + '/' + MODEL_CONFIG['name'] + '/' + MODEL_CONFIG['version']
predict_api = MODEL_CONFIG['host'] + '/v' + MODEL_CONFIG['version'] + '/models/' + MODEL_CONFIG['name'] + ':predict'

# 从保存模型中加载输入和输出处理类
text_processor, label_processor = load_processors_from_model(model_path)


@app.get("/predict")
def predict(sentence: str):
    try:
        seg_sentence = list(sentence)

        samples = [
            seg_sentence
        ]

        # 此步骤将分词后的输入转换成张量
        tensor = text_processor.transform(samples)

        # 张量格式转换为 TF-Serving 接受的格式
        if MODEL_CONFIG['embedding_type'] == 'bert':
            # BERT 格式
            instances = [{
                "Input-Token": i.tolist(),
                "Input-Segment": np.zeros(i.shape).tolist()
            } for i in tensor]
        else:
            # 通用格式
            instances = [i.tolist() for i in tensor]

        # 使用 requests 框架发送请求进行推理
        req = requests.post(predict_api, json={"instances": instances})

        # 获取推理结果概率
        predictions = req.json()['predictions']

        # 使用标签处理器将标签 index 转换成具体的标签
        labels = label_processor.inverse_transform(np.array(predictions).argmax(-1))

        return api_response(0, {'label': labels[0]}, 'ok')
    except:
        return api_response(400, {'label': ''}, 'error')


if __name__ == '__main__':
    print('启动程序')
    uvicorn.run(
        app='run_http:app',
        host=HTTP_CONFIG['host'],
        port=HTTP_CONFIG['port'],
        workers=HTTP_CONFIG['workers'],
        reload=False,
        debug=False,
        access_log=False,
        log_level='error')
