import json
import time
import logging
import tornado.ioloop
import traceback
import concurrent.futures
from tornado.httpserver import HTTPServer
from logging.handlers import RotatingFileHandler
from config import OPERATE_MODEL_CONFIG, answerMapping, NER_SCHEMA, itemMapping, taskStatusMapping, \
    workOrderStatusMapping
from date_standard import date_standard
import model_future as modelUtils

llm, llm_ner, loRA_classify_lora, loRA_ner_lora, tokenizer = modelUtils.load_vllm_model()
sampling_params = modelUtils.get_sampling_params()

file_handler = RotatingFileHandler('log/classify.log', maxBytes=20 * 1024 * 1024, backupCount=30)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = logging.getLogger('classify')
logger.setLevel(logging.INFO)
logger.addHandler(file_handler)


def get_intent_id_by_schema(schema_value):
    for item in NER_SCHEMA:
        if schema_value in item['schema']:
            return item['intentId']
    return None


category_names = ''
# 读取category标签配置
with open("category/label_config.json", "r") as fileCategory:
    data = json.load(fileCategory)
    category_names = json.dumps([item['text'] for item in data], ensure_ascii=False)

def predictCategory(query):

    test_texts = {
        "instruction": "你是一个文本分类领域的专家，你会接收到一段文本和几个潜在的分类选项，请输出文本内容的正确类型,例如 任务编号",
        'input': "文本:" + query + ",类型选型:" + json.dumps(category_names, ensure_ascii=False)
    }

    instruction = test_texts['instruction']
    input_value = test_texts['input']

    messages = [
        {"role": "system", "content": f"{instruction}"},
        {"role": "user", "content": f"{input_value}"}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    time_start = time.time()
    # print('predict-model test: {}'.format(text))
    outputs = llm.generate([text], sampling_params, lora_request=loRA_classify_lora)
    # print('predict-model耗时：{}ms, outputs: {}'.format(int((time_end - time_start) * 1000), outputs))
    result = outputs[0].outputs[0].text
    time_end = time.time()
    print('predictCategory-model耗时：{}ms, result: {}'.format(int((time_end - time_start) * 1000), result))
    return result

span_names = ''
# 读取span标签配置
with open("span/label_config.json", "r") as fileSpan:
    data = json.load(fileSpan)
    span_names = "; ".join([item['text'] for item in data])


def predictNER(input_text):
    # cats = "任务编号"
    # input_text = "查询一下任务编号：TASK212400000503的排产任务表"

    test_texts = {
        "instruction": "你是一个文本实体识别领域的专家，你需要从给定的句子中提取 " + span_names + """ 实体. 以 json数组 格式输出, 如 [{"entity_text": "TASK202400000501", "entity_label": "任务编号"}, {"entity_text": "任务表", "entity_label": "表名"}] 注意: 1. 输出的每一行都必须是正确的 带双引号的 json数组 字符串 . 2. 找不到任何实体时, 输出"没有找到任何实体".""",
        # "input": f"类型:{cats};文本:{input_text}",
        "input": f"文本:{input_text}",
    }

    instruction = test_texts['instruction']
    input_value = test_texts['input']

    messages = [
        {"role": "system", "content": f"{instruction}"},
        {"role": "user", "content": f"{input_value}"}
    ]

    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )

    time_start = time.time()
    # print('predictNER-model test: {}'.format(text))
    outputs = llm_ner.generate([text], sampling_params, lora_request=loRA_ner_lora)
    time_end = time.time()
    # print('predictNER-model耗时：{}ms, outputs: {}'.format(int((time_end - time_start) * 1000), outputs))
    result = outputs[0].outputs[0].text
    print('predictNER-model耗时：{}ms, result: {}'.format(int((time_end - time_start) * 1000), result))
    return result


class MainHandler(tornado.web.RequestHandler):

    def data_received(self, chunk):
        pass

    def post(self, *args, **kwargs):
        try:
            request_json = json.loads(self.request.body)
            query = request_json.get('query', '')
            logger.info('#' * 50)
            logger.info(f'query: {query}')
            print(f'query: {query}')
            with concurrent.futures.ThreadPoolExecutor() as executor:
                future_category = executor.submit(predictCategory, query)
                future_ner = executor.submit(predictNER, query)
                category = future_category.result()
                keywords_json = future_ner.result()
                if category != '未找到':
                    keywords = []
                    keywords_bean = json.loads(keywords_json)
                    for item in keywords_bean:
                        key = item['entity_label']
                        value = item['entity_text']
                        if key == 'TASK状态':
                            value = taskStatusMapping[value]
                        if key == 'MO状态':
                            value = workOrderStatusMapping[value]
                        if ('时间' in key) and (key != 'MO延期时间') and (key != 'TSK延期时间'):
                            left, right = date_standard(value)
                            base = itemMapping[key]
                            keywords[base + 'Left'] = left
                            keywords[base + 'Reft'] = right
                        else:
                            keywords.append({itemMapping[key]: value})
                intentId = get_intent_id_by_schema(category)
                result = {
                    'code': 200,
                    'msg': 'success',
                    'data': {
                        'answer': answerMapping[intentId],
                        'intentId': intentId,
                        'keywords': keywords
                    }
                }
            logger.info(f'response: {result}')
            self.write(result)

        except Exception as e:
            print(traceback.format_exc())
            logger.error(traceback.format_exc())
            self.write({
                'code': 400,
                'msg': e,
                'data': {}
            })


application = tornado.web.Application([
    ("/classify", MainHandler)
])

if __name__ == '__main__':
    server = HTTPServer(application)
    WEB_PORT = OPERATE_MODEL_CONFIG['WEB_PORT']
    server.bind(WEB_PORT)
    server.start(num_processes=1)  # redis_info.num_processes)
    print("server is running on {}".format(WEB_PORT))
    tornado.ioloop.IOLoop.current().start()
