#! -*- coding: utf-8 -*-
"""
@Author: AI
@Create Time: 20240625
@Info: api接口
"""
import time
import json
import logging
from logging.handlers import RotatingFileHandler
import tornado.ioloop
from tornado.httpserver import HTTPServer

from inference import Inference
WEB_PORT = 8892

predictor = Inference()
print('模型加载成功.....')

# 日志记录器，记录web请求和回调细节，便于调试排错
file_handler = RotatingFileHandler('log/ner.log', maxBytes=20 * 1024 * 1024, backupCount=30)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = logging.getLogger('ner')
logger.setLevel(logging.INFO)
logger.addHandler(file_handler)


class MainHandler(tornado.web.RequestHandler):
    def data_received(self, chunk):
        pass

    def post(self, *args, **kwargs):
        try:
            request_json = json.loads(self.request.body)
            sentence = request_json.get('query', '')
            operate_type = request_json.get('operate_type', '')
            logger.info("request: {}".format(request_json))
            if sentence:
                time_start = time.time()
                result = predictor.predict_batch(sentence, operate_type)
                time_end = time.time()
                print('耗时：{}ms'.format(int((time_end-time_start)*1000)))
                logger.info(f'result: {result}')
                self.write({'entities': result})
        except Exception as e:
            print(e)
            logging.info(e)


application = tornado.web.Application([
    ("/ner", MainHandler)
])

if __name__ == '__main__':
    server = HTTPServer(application)
    server.bind(WEB_PORT)
    server.start(num_processes=1)  # redis_info.num_processes)
    print("server is running on {}".format(WEB_PORT))
    tornado.ioloop.IOLoop.current().start()
