import re

import requests
from tornado import httputil
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.gen
import argparse
import json
# from onnx_model import ONNXModel
import traceback
import random

from tornado.web import Application
import torch
# from .load_model import mae_model, mae_cls, mae_device

# load bot
# from colorama import Fore, Style
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation.utils import GenerationConfig
from search_zhengzhuang import get_augument_content
from tqdm import  tqdm
import os
import logging

def get_logger(log_file_path=None, info_level=logging.DEBUG, console_out=False):
  formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  logger = logging.getLogger(__name__)
  logger.setLevel(level=info_level)

  if console_out:
    console = logging.StreamHandler()
    console.setLevel(info_level)
    console.setFormatter(formatter)
    logger.addHandler(console)

  if log_file_path:
    if not os.path.exists(os.path.dirname(log_file_path)):
      os.makedirs(os.path.dirname(log_file_path))
    handler = logging.FileHandler(log_file_path, encoding="utf-8")
    handler.setLevel(info_level)
    handler.setFormatter(formatter)
    logger.addHandler(handler)
  return logger

parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=8019, help="服务端口")
args = parser.parse_args()

model_path =  "/home/centos/ll/llms/weights/Baichuan2-13B-Chat/"
url_medical_record = 'http://192.168.17.189:9121/test'
logger = get_logger('./wenzhen.log')

# model_path = "/home/test/mjm_data/Baichuan2-13B-Chat/"

def readjson(file_path):
  """"""
  # make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

# @st.cache_resource
def init_model():
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        torch_dtype=torch.float16,
        device_map="auto",
        trust_remote_code=True
    )
    model.generation_config = GenerationConfig.from_pretrained(
        model_path
    )
    tokenizer = AutoTokenizer.from_pretrained(
        model_path,
        use_fast=False,
        trust_remote_code=True
    )
    return model, tokenizer

class Application(tornado.web.Application):
    def __init__(self, handlers, settings):
        super(Application, self).__init__(handlers, **settings)


def get_args(args, key):
   if key not in args:
      return ''
   else:
      res = [
             args.get(key)[i].decode() 
             for i in range(len(args.get(key, [])))
             ]
      
      if len(res) == 0:
         res = ''

      if len(res) == 1:
          res = res[0]


      return res

def readjson(file_path):
  """"""
#   make_path_legal(file_path)
  with open(file_path, "r", encoding="utf-8") as f:
    return json.load(f)

# ======================================= prompt ===================================

def get_history_prompt(history, question, history_nums=5):
   conv = ''
   if history[-1 * history_nums:]: # TODO： 实验找，使用几轮对话。
      for i, (old_query, response) in enumerate(history[-1 * history_nums:]):
         conv += "[Round {}]\n医生：{}\n患者：{}\n".format(i, old_query, response)
        #  prompt += "[Round {}]\n医生问：{}\n患者答：".format(len(history), question)
   
   conv += f'[Round {len(history[-1 * history_nums:])}]\n医生：{question}。'
   return conv


def get_all_history_prompt(history, max_len=4096):
    conv = ''
    if isinstance(history[0], str):
        for i, value in enumerate(history):
            role = '医生' if i % 2 == 0 else '患者'
            cur = f'{role}：{value}\n'
            if len(conv) + len(cur) > max_len:
                break
            else:
                conv += cur
    elif isinstance(history[0], (tuple, list)):
        for role, value in history:
            if isinstance(role, int):
                role = '医生' if role == 0 else '患者'
            cur = f'{role}：{value}\n'
            if len(conv) + len(cur) > max_len:
                break
            else:
                conv += cur
    elif isinstance(history[0], dict):
        last_role = ''
        for his in history:
            for role, text in his.items():
                if len(f'{role}：{text}\n') + len(conv) > max_len:
                    break
                if last_role != role:
                    conv += f'\n{role}：{text}' if conv else f'{role}：{text}'
                    last_role = role
                else:
                    conv += f'{text}'


        conv += '\n'
    # print(f'history all prompt: {conv}')
    return conv
def get_prompt_symptoms_tuijian(symptoms, keshi, history, question):
   sub_symptoms = symptoms.get(keshi, [])
   random.shuffle(sub_symptoms)
   sub_symptoms = f'{",".join(sub_symptoms[:20])}'
   prompt_history = get_all_history_prompt(history + [question])
#    prompt = f'症状：{sub_symptoms}。请根据问题，只能从症状中选择 5 个最相关症状，json 数组格式化输出。'
   prompt = f"请根据医生患者的对话，从####后给出的症状集合中选择 5 个最相关症状，并以 json 数组格式化输出。Json 格式为：['症状1', '症状2', ...]。"
   # if history[:2]: # TODO： 实验找，使用几轮对话。
   #    for i, (old_query, response) in enumerate(history):
   #       prompt += "[Round {}]\n医生问题：{}\n患者回答：{}\n".format(i, old_query, response)
   #      #  prompt += "[Round {}]\n医生问：{}\n患者答：".format(len(history), question)
   # else:
   #    prompt += f'医生问题：{question}。'
   prompt += f"医生患者对话：《{prompt_history}》"
   prompt += '####{' + sub_symptoms + '}'
   return prompt

def get_prompt_symptoms_tuijian_only_llms(history, question):
    prompt_his = get_all_history_prompt(history + [question])
    prompt = f"""作为专业医生助理，根据《》中的医生患者问诊对话内容，执行以下操作：
1-根据医生患者对话内容，提取患者当前症状。
2-根据患者当前症状，推测患者可能的其它症状。
3-使用 json 格式化步骤 2 的结果，并返回。

医生患者问诊对话：《{prompt_his}》
"""
    return prompt

def get_prompt_extract_symptoms(text):
   prompt = f"给你一段文本，从中提取出症状相关的实体。以 json 格式化输出。Json 格式为：['症状1', '症状2', ...]。文本为：{text}"
   return prompt

def get_prompt_extract_symptoms_with_history(history, text):
   prompt_history = get_all_history_prompt(history + [text])
   prompt = f"请根据医生患者对话内容，从中提取出症状相关的实体。以 json 格式化输出。json 格式为：['症状1', '症状2', ...]。医生患者对话内容为：《{prompt_history}》"
   return prompt

def get_prompt_bodypart_tuijian(body_parts, history, question):
   prompt_history = get_all_history_prompt(history + [question])
   if body_parts:
      prompt = f"请根据患者的对话问题，从####后给出的身体部位名称集合中，选择 5 个最相关身体部位名称，并json 数组格式化输出。json 格式为：['身体部位 1', '身体部位 2', ...]。"
      prompt += '####{' + ','.join(body_parts) + '}'
   else:
      prompt = f"请根据患者的对话问题，选择 5 个最相关身体部位名称，并 json 数组格式化输出。json 格式为：['身体部位 1', '身体部位 2', ...]。"

   return prompt


def get_prompt_medicine_tuijian(history, question):
   prompt_history = get_all_history_prompt(history + [question])
   prompt = f"请根据医生患者的对话内容，选择 5 个最相关药品进行推荐，并以 json 格式化输出。json 格式为：['药品1', '药品2', ...]。医生患者对话内容：《{prompt_history}》"
   return prompt


def get_prompt_medicine_tuijian_only_llms(history, question):
   prompt_history = get_all_history_prompt(history + [question])
   prompt = f"""作为专业医生助理，根据《》中的医生患者问诊对话内容，执行以下操作：
1-判断医生是否在询问患者使用了何种药物，输出结果：是或否
2-如果步骤 1 输出结果为：是，请根据医生患者对话内容，提取患者当前症状。
3-根据步骤 2 提取的患者当前症状，猜测患者可能使用过的药物名称。
4-使用 json 格式化步骤 3 的结果，并返回。

医生患者问诊对话：《{prompt_history}》
"""
   return prompt

def get_prompt_allergy_tuijian(history, question):
   prompt_history = get_all_history_prompt(history + [question])
   prompt = f"请根据医生患者的对话内容，选择 5 个最相关，过敏情况进行推荐，并以 json 格式化输出。json 格式为：['过敏情况1', '过敏情况2', ...]。医生患者对话内容：《{prompt_history}》"
   return prompt

def get_prompt_allergy_only_llms(history, question):
   prompt_his = get_all_history_prompt(history + [question])
   prompt = f"""作为专业医生助理，根据《》中的医生患者问诊对话内容，执行以下操作：
1-判断医生是否在询问患者的过敏情况，输出结果：是或否。
2-如果步骤 1 输出结果为：是，请根据医生患者对话内容，提取患者当前症状。
3-根据步骤 2 提取的患者当前症状，尝试猜测可能由何种过敏导致的这些症状。
4-使用 json 格式化步骤 3 的结果，并返回。

医生患者问诊对话：《{prompt_his}》
"""
   return prompt
def get_prompt_allergy_tuijian_symptoms(symptoms):
   # prompt_history = get_all_history_prompt(history + [question])
   symptoms = ','.join(symptoms)
   prompt = f"请根据症状，选择 5 个最相关，过敏物进行推荐，并以 json 格式化输出。json 格式为：['过敏物 1', '过敏物 2', ...]。症状：《{symptoms}》"
   # prompt = f"请根据医生患者的对话内容，选择 5 个最相关，过敏情况进行推荐，并以 json 格式化输出。json 格式为：['过敏情况1', '过敏情况2', ...]。医生患者对话内容：《{prompt_history}》"
   return prompt

def get_prompt_intention_tuijian(history, question):
   conv = ''
   # if history[-1:]: # TODO： 实验找，使用几轮对话。
   #    for i, (old_query, response) in enumerate(history):
   #       conv += "[Round {}]\n医生：{}\n患者：{}\n".format(i, old_query, response)
   #      #  prompt += "[Round {}]\n医生问：{}\n患者答：".format(len(history), question)
   # else:
   conv += f'[Round 0]\n医生：{question}。'

   prompt = f"请根据医生患者的对话内容：{conv}，给出一个最相关的用户意图类型，直接用json格式化输出，json 格式为{'intent_type':'','推荐关键字':''}，意图类型包括" + "{症状推荐，药品推荐，过敏情况推荐，症状部位推荐，其它} "

   return prompt


def get_all_symptoms(file='./datas/all_symptoms.json'):
   symptoms = []
   data = readjson(file)
   # for k, vs in data.items():
   #     symptoms.extend(vs)
   for k, item in data.items():
      for sk, vs in item.items():
         symptoms.extend(vs['all_symptoms'])
   return symptoms

def get_generate_medical_record_prompt(history):
    prompt = '给定医生和患者的对话内容，请生成一份患者的问诊的信息报告，' \
             '包括{主诉，现病史，过敏史，个人史，婚育史，家族史，检查检验结果}。如果没有则写“无”,并用严格用json格式输出。' \
             "json格式为：{'主诉':''，'现病史':''，'过敏史':''，'个人史':''，'婚育史':''，'家族史':''，'检查检验结果':''}" + "\n" \
                                                                                                                      "关键信息解释：" \
                                                                                                                      "主诉通常是指患者在就医时所描述的他们的症状、不适或健康问题。这是患者在对医生进行初诊时提出的问题，是他们希望解决的主要健康问题。在这个情景下，患者的主诉是头疼和发热。" \
                                                                                                                      "；现病史指的是患者当前正在经历的疾病情况的描述。它包括症状的起始时间、症状的性质、症状的持续时间以及可能的伴随症状。" \
                                                                                                                      "；过敏史指的是患者对特定物质或环境因素产生过敏反应的历史。这些过敏原可以包括食物、药物、花粉、宠物毛发等。在这个情景下，患者提到他有花粉过敏史，表示他对花粉过敏。" \
                                                                                                                      "；个人史包括有关患者个人生活和健康的历史信息，通常包括个人的生活习惯和行为，如饮酒、吸烟、锻炼、饮食习惯等。在这个情景下，患者没有提到具体的个人史，因此标注为'无'。" \
                                                                                                                      "；婚育史指的是有关患者与婚姻、生育和家庭计划相关的历史和信息。包括婚姻状态、婚姻历史、生育历史等。在这个情景下，患者没有提到具体的婚育史，因此标注为'无'。" \
                                                                                                                      "；家族史指的是有关患者直系亲属和近亲属的健康历史信息，具体包括家庭成员的健康状况。在这个情景下，患者没有提到具体的家族史，因此标注为'无'。" \
                                                                                                                      "；检查检验结果指的是患者的检查和检验结果，包括各种医学测试和检查所得到的数据，用于评估患者的生理状况、疾病诊断和治疗进展。在这个情景下，患者表示暂时没有进行过体检或其他相关的检查，因此标注为'暂时没有'。" \
                                                                                                                      "\n" \
                                                                                                                      '医生和患者的对话内容为：'
    history_prompt = get_all_history_prompt(history, max_len=4096 - len(prompt))

    # if len(history_prompt) > 4000 - len(prompt):
    #     history_prompt = history_prompt[-(4000 - len(prompt)):]
    prompt += history_prompt
    return prompt

# ================================================================ 初始化参数 =============================

model, tokenizer = init_model()
symptoms = readjson('./datas/all_comm_symptoms.json')
symptoms_all = get_all_symptoms('./datas/all_symptoms.json')
bodypart_symptoms_mapping = readjson('./datas/symptoms_body_mapping.json')
bodypart_mapping = readjson('./datas/bodypart_mapping.json')
symptoms_bodypart_mapping = readjson('./datas/symptoms_bodypart_mapping.json')
first_problems_keshi = readjson('./datas/problems_keshi.json')
cache_prompt = ''
cache_prompt_generate_record = ''
# 科室同义词
keshi_mapping = {"普通外科": "普外科",
                 "骨科": "骨外科",
                 "胸外科": "心胸外科",
                 "烧伤重建外科": "烧伤科",
                 "整形美容外科": "整形美容科",
                 "消化科": "消化内科",
                 "口腔医学中心": "口腔科",
                 "耳鼻咽喉科": "耳鼻喉科"}

def remove_not_chines(v):
    if isinstance(v, str):
        v = re.sub('[^\u4e00-\u9fa5]+', '', v)
        if not v:
            v = ''
        return v
    elif isinstance(v, (tuple, list)):
        v = [re.sub('[^\u4e00-\u9fa5]+', '', k) for k in v]
        v = [k for k in v if k]
        return v


def handle_value(data):
    if isinstance(data, (tuple, list)):
        if data:
            if isinstance(data[0], (tuple, list)):
                tmp = []
                for v in data:
                    tmp += list(v)
                data = handle_value(tmp)
            elif isinstance(data[0], dict):
                data = [handle_array_res(v) for v in data]
                data = handle_array_res(data)
        return remove_not_chines(data)
    elif isinstance(data, dict):
        values = []
        for k, vs in data.items():
            if isinstance(vs, (tuple, list)):
                values.extend(list(vs))
            elif isinstance(vs, str):
                values.append(vs)
            elif isinstance(vs, dict):
                values.append(handle_value(vs))
            else:
                values.append(k)
        values = [handle_value(v) for v in values]

        return remove_not_chines(values)
    elif isinstance(data, (str, int, float)):
        return remove_not_chines(str(data))
    else:
        print(f'unsupported datatype: {data}')

def handle_array_res(res):
    # json 串
    data = []
    try:
        data = json.loads(res)
        data = handle_value(data)
    except:
        # 非 json
        # 1、处理情况：
        # "['s', 'x']"
        reg = '\[(?P<v>[^\[\]]+)\]'
        search = re.search(reg, res)
        if search:
            data = search.group('v')
            data = [v.strip("'\"\n ") for v in data.split(',')]
        else:
            # 2、处理情况：
            #   1. xx:
            #   2. xx:
            reg = '\d+[\.、\s]*(?P<v>[^\s:：]+)[:：]'
            search = re.finditer(reg, res)
            data = [v.group('v') for v in search if v]
        data = handle_value(data)
    return data


def handle_dict_res(res):
    # json 串
    data = []
    try:
        data = json.loads(res)
        # data = handle_value(data)
    except:
        # # 非 json
        # # 1、处理情况：
        # # "['s', 'x']"
        # reg = '\[(?P<v>[^\[\]]+)\]'
        # search = re.search(reg, res)
        # if search:
        #     data = search.group('v')
        #     data = [v.strip("'\"\n ") for v in data.split(',')]
        # else:
        #     # 2、处理情况：
        #     #   1. xx:
        #     #   2. xx:
        #     reg = '\d+[\.、\s]*(?P<v>[^\s:：]+)[:：]'
        #     search = re.finditer(reg, res)
        #     data = [v.group('v') for v in search if v]
        # data = handle_value(data)
        data = {}
    return data

def chat(prompt, messages_add=[], decode_json=False, default_res={}):
   # print(prompt)
   # 返回参数
   messages = messages_add + [{"role": "user", "content": prompt}]
   # messages = readjson('./message.json')
   position = 0
   res = default_res
   try:
         res = ''
         for response in model.chat(tokenizer, messages, stream=True):
            # print(response[position:], end='', flush=True)
            res += response[position:]
            position = len(response)
            if torch.backends.mps.is_available():
               torch.mps.empty_cache()
         if decode_json and res:
            res = json.loads(res)
   except:
         res = default_res
   return res

def check_symptoms(question, symptoms):
    for k in symptoms:
        if k in question:
            return True
    return False

def intent_reg(history, question, symptoms={}, use_llm=True):
   if use_llm:
       prompt = get_prompt_intention_tuijian(history, question)
       res = chat(prompt, decode_json=False, default_res={})
       return res.get('intent_type', '其它')
   else:
       if '症状' in question or check_symptoms(question, symptoms):
           return '症状推荐'
       elif '药' in question:
           return '药品推荐'
       elif '过敏' in question:
           return '过敏情况推荐'
       elif '哪' in question:
           return '症状部位推荐'
       # TODO : 身体部位推荐
       else:
           return '其它'

def handle_symptom_recommend_val(data):
    if not data:
        return []

    if isinstance(data, (tuple, list)):
        if isinstance(data[0], str):
            return data
        if isinstance(data[0], dict):
            data = [v['symptom'] for v in data]
            data = handle_symptom_recommend_val(data)
            return data
    else:
        return []
def handle_symptom_recommend_res(data):
    res = []
    try:
        data = json.loads(data)
        if 'possible_other_symptoms' in data:
            data = data['possible_other_symptoms']
        res = handle_symptom_recommend_val(data)
    except:
        pass
    return res

def symptom_recommend(symptoms, keshi, history, text, messages=[]):
    # 方案一：只使用大语言模型
    prompt = get_prompt_symptoms_tuijian_only_llms(history[-2:], text)
    res = chat(prompt, messages, decode_json=False)
    res = handle_symptom_recommend_res(res)
   # prompt_rec = get_prompt_symptoms_tuijian(symptoms, keshi, history, text)
   # prompt_ext = get_prompt_extract_symptoms(text)
   # symptoms_ext = chat(prompt_ext, messages, decode_json=False, default_res={})
   # symptoms_ext = handle_array_res(symptoms_ext)
   # # if isinstance(symptoms_ext, dict):
   # #  symptoms_ext = symptoms_ext.get('症状', [])
   # res_rec = chat(prompt_rec, messages, decode_json=False, default_res=[])
   # res_rec = handle_array_res(res_rec)
   # res = list(set(symptoms_ext + res_rec))
    return res

def get_bodypart_with_symptoms(bodypart_symptoms_mapping, bodypart_mapping, symptoms):
   res_bodypart_symptoms = []
   for v in symptoms:
      res_bodypart_symptoms.extend(bodypart_symptoms_mapping.get(v, []))

   # 获取更具体的身体部位名称
   res = []
   for v in res_bodypart_symptoms:
      res.extend(bodypart_mapping.get(v, [v]))
   return res

def get_body(symptoms_bodypart_mapping, symptoms):
    res = []
    for sym in symptoms[::-1]:
        if symptoms_bodypart_mapping.get(sym, []):
            return symptoms_bodypart_mapping.get(sym, [])
        else:
            # 检索
            similarity_syms = get_augument_content(sym)
            for sim_sym, dis in similarity_syms:
                if dis > 0.87:
                    if symptoms_bodypart_mapping.get(sim_sym, []):
                        symptoms_bodypart_mapping.get(sim_sym, [])
    return res

def extract_symp(history, question, messages):
    syms = []
    prompt = get_prompt_medicine_tuijian_only_llms(history, question)
    res = chat(prompt, messages, decode_json=False, default_res={})
    try:
        res = json.loads(res)
        syms = res.get('current_symptoms', [])
        if isinstance(syms, str):
            syms = [syms]
    except:
        pass
    return syms

def bodypart_recommend(symptoms_bodypart_mapping, history, question, messages=[]):
    # 提取症状
    symptoms = extract_symp(history, question, messages)
    # 根据症状，获取身体部位
    bodyparts = get_body(symptoms_bodypart_mapping, symptoms)
    if bodyparts:
        return bodyparts
    else:
        # 提取部位
        prompt_body_ext = f'从《》中的文本中，提取身体部位的实体, 并以 json 格式化输出。json 格式为：["身体部位1", "身体部位2", ...]。。文本：《{question}》'
        bodypart_ext = chat(prompt_body_ext, decode_json=False, default_res={})
        bodypart_ext = handle_array_res(bodypart_ext)
        # 提取症状
        prompt_ext = get_prompt_extract_symptoms_with_history(history, question)
        symptoms_ext = chat(prompt_ext, decode_json=False, default_res={})
        symptoms_ext = handle_array_res(symptoms_ext)
        # TODO: 过滤症状（利用知识库）
        # if isinstance(symptoms_ext, dict):
        #     symptoms_ext = symptoms_ext.get('症状', [])
        # 获取相关身体部位
        bodyparts = get_bodypart_with_symptoms(bodypart_symptoms_mapping, bodypart_mapping, symptoms_ext)
        prompt_bodypart = get_prompt_bodypart_tuijian(bodyparts, history, question)
        # res:  ['胸部', '肋骨', '心脏', '肺部', '胸骨']
        res = chat(prompt_bodypart, decode_json=False, default_res=[])
        res = handle_array_res(res)
        res += bodypart_ext
        return res

def handle_medical_rec_res(res):
    data = []
    try:
        res = json.loads(res)
        for k, value in res.items():
            if 'drug' in k and k != 'question_drug':
                if isinstance(value, (tuple, list)):
                    data = value
                    break
    except:
        print(f'Handle medecal reg result failed! res: {res}')
    return data
def medicine_recommend(history, question, messages=[]):
   prompt_medicine = get_prompt_medicine_tuijian_only_llms(history, question)
   res = chat(prompt_medicine, messages, decode_json=False, default_res={})
   res = handle_medical_rec_res(res)
   return res

def pad_allergy(data):
    res = []
    for v in data:
        if '过敏' not in v:
            res.append(v + '过敏')
        else:
            res.append(v)
    return res
def handle_allergy_res(res):
    data = []
    try:
        res = json.loads(res)
        for k, value in res.items():
            if 'allerg' in k and k != 'question_allergy':
                if isinstance(value, (tuple, list)):
                    data = value
                    break
    except:
        print(f'Handle medecal reg result failed! res: {res}')
    filter_values = ['未知']
    data = [v for v in data if v not in filter_values]

    if not data:
        data = ['食物过敏', '花粉过敏', '药物过敏', '接触性过敏']
        random.shuffle(data)
        i = random.randint(3,4)
        data = data[:i]

    data = pad_allergy(data)
    return data
def allergy_recommend(history, question, messages=[]):
   # 推荐过敏物
   prompt_allergy = get_prompt_allergy_only_llms(history, question)
   res = chat(prompt_allergy, messages, decode_json=False, default_res={})
   res = handle_allergy_res(res)
   return res

def handle_dict_res(res):
    try:
        data = json.loads(res)
    except:
        reg = '{[^{}]+}'
        search = re.search(reg, res)
        if search:
            search = search.group()
            search = search.replace("'", '"')
            data = handle_dict_res(search)
        else:
            data = {}
    return data

def generate_medical_record(history, messages=[]):
   # 提取症状
   global cache_prompt
   prompt = get_generate_medical_record_prompt(history)
   if prompt == cache_prompt:
       prompt += '\n'

   cache_prompt = prompt

   medical_record = chat(prompt, messages, decode_json=False, default_res={})
   medical_record = handle_dict_res(medical_record)
   return medical_record


def test_prompt(history, prompt_test='', messages=[]):
   # 提取症状
   global cache_prompt
   if not prompt_test:
       prompt = get_generate_medical_record_prompt(history)
       if prompt == cache_prompt:
           prompt += '\n'

       cache_prompt = prompt
   else:
       prompt = prompt_test

   test_res = chat(prompt, messages, decode_json=False, default_res={})
   # medical_record = handle_dict_res(medical_record)
   return test_res

def check_recommend_res(res):
    reg = '[\u4e00-\u9fa5]+'
    res = [v for v in res if re.search(reg, v)]
    return res
class RecommendHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No Impliment''')

    def post(self):
        ret = {'status': 200}
        try:
          
         global symptoms
         global symptoms_all
         global bodypart_symptoms_mapping
         global bodypart_mapping
         # global logger
         # 读取参数
         args = self.request.body.decode('utf-8')
         logger.info(args)
         print(args)
         args = json.loads(args)
         logger.info(args)
         username = args.get('username', '')
         print(args)
         text = args.get('text', '')
         keshi = args.get('keshi', '')
         history = args.get('history', [])
         messages = args.get('messages', [{'role':'system', 'content':'你是一个医生助理， 每次回答以 json 格式返回数据。'}])
         reverse_1 = args.get('reverse_1', '')
         reverse_2 = args.get('reverse_2', '')
         # 第二轮对话，直接推荐症状
         if len(history) == 0:
             if keshi in keshi_mapping:
                 keshi = keshi_mapping[keshi]
             symptom = symptoms.get(keshi, [])
             random.shuffle(symptom)
             res = symptom[:5]

         else:
             # 意图识别
             intent = intent_reg(history, text, symptoms_all, use_llm=False)
             res = []
             if intent == '症状推荐':
                res = symptom_recommend(symptoms, keshi, history, text, messages)
             elif intent == '药品推荐':
                res = medicine_recommend(history, text, messages)
             elif intent == '过敏情况推荐':
                res = allergy_recommend(history, text, messages)
             elif intent == '症状部位推荐':
                res = bodypart_recommend(symptoms_bodypart_mapping, history, text, messages)
         res = check_recommend_res(res)
         ret['content'] = res[:5]
         ret['reserve_1'] = ''
         ret['reserve_2'] = ''
         ret['status'] = 200
        except Exception as e:
          ret['status'] = 1
          print(traceback.format_exc())
        self.write(json.dumps(ret))


class IntentionHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No impliment''')

    def post(self):
        ret = {'status': 200}
        try:
         
         global symptoms
         global symptoms_all
         global bodypart_symptoms_mapping
         global bodypart_mapping

         # 读取参数
         args = self.request.body.decode('utf-8')
         logger.info(args)
         args = json.loads(args)
         logger.info(args)
         username = args.get('username', '')
         text = args.get('text', '')
         keshi = args.get('keshi', '')
         history = args.get('history', [])
         reverse_1 = args.get('reverse_1', '')
         reverse_2 = args.get('reverse_2', '')
         res = intent_reg(history, text, symptoms_all, use_llm=False)

         ret['content'] = res
         ret['reserve_1'] = ''
         ret['reserve_2'] = ''
         ret['status'] = 200
        except Exception as e:
          ret['status'] = 1
          print(traceback.format_exc())
        self.write(json.dumps(ret))


class GenerateMedicalRecordHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No impliment''')

    def handle_res(self, result):
        if isinstance(result, str):
            if '```json' in result:
                result = result.replace('```json', '')
                result = result.replace('```', '')
            return result
        else:
            return result

    def get_medical_record(self, text, history, messages=[]):
        res = ''
        try:
            global cache_prompt_generate_record
            if text:
                history = history + [text]
            prompt_his = get_all_history_prompt(history)
            prompt = get_medical_prompt(prompt_his)

            # 缓存 prompt
            if prompt == cache_prompt_generate_record:
                prompt += '\n'
            cache_prompt_generate_record = prompt

            # 生成病历
            data = {'text': prompt, 'history': [], 'messages': messages}
            res = requests.post(url_medical_record, data=json.dumps(data)).text
            res = json.loads(res)
            if '主诉' not in res['response']:
                res = {'text': '病历生成失败，请输入正确的医生患者对话内容!'}
            else:
                res = self.handle_res(res['response'])
            # res = self.handle_res(res['response'])
            # if '主诉' not in res:
            #     res = {'text': '病历生成失败，请输入正确的医生患者对话内容!'}
            # else:
            #     res = json.loads(res)
            #     res = self.handle_res(res['response'])
        except:
            print(traceback.format_exc())
            res = ''
        return res

    def post(self):
        ret = {'status': 200}
        try:
            # 读取参数
            args = self.request.body.decode('utf-8')
            logger.info(args)
            args = json.loads(args)
            logger.info(args)
            username = args.get('username', '')
            text = args.get('text', '')
            keshi = args.get('keshi', '')
            messages = args.get('messages', [])
            history = args.get('history', [])
            reverse_1 = args.get('reverse_1', '')
            reverse_2 = args.get('reverse_2', '')

            # history = list(zip(history[0::2], history[1::2]))
            # res = generate_medical_record(history, messages)
            res = self.get_medical_record(text, history, messages)

            ret['content'] = res
            ret['reserve_1'] = ''
            ret['reserve_2'] = ''
            ret['status'] = 200
        except Exception as e:
            ret['status'] = 1
            print(traceback.format_exc())
        self.write(json.dumps(ret))


def get_medical_prompt(prompt_his):
    # v1 效果可以
    prompt = '给定医生和患者的对话内容，请生成一份患者的信息报告，' \
             '包括{主诉，现病史，过敏史，个人史，婚育史，家族史，检查检验结果}。如果没有则写“无”,并用严格用json格式输出。' \
             "json格式为：{'主诉':''，'现病史':''，'过敏史':''，'个人史':''，'婚育史':''，'家族史':''，'检查检验结果':''}" + "\n" \
              "关键信息解释：" \
              "主诉通常是指患者在就医时所描述的他们的症状、不适或健康问题。这是患者在对医生进行初诊时提出的问题，是他们希望解决的主要健康问题。" \
              "；现病史指的是患者当前正在经历的疾病情况的详细描述, 为一句话。它包括症状的起始时间、症状的性质、症状的持续时间以及可能的伴随症状。" \
              "；过敏史指的是患者对特定物质或环境因素产生过敏反应的历史。这些过敏原可以包括食物、药物、花粉、宠物毛发等。" \
              "；个人史包括有关患者个人生活和健康的历史信息，通常包括个人的生活习惯和行为，如饮酒、吸烟、锻炼、饮食习惯等。在这个情景下，患者没有提到具体的个人史，因此标注为'无'。" \
              "；婚育史指的是有关患者与婚姻、生育和家庭计划相关的历史和信息。包括婚姻状态、婚姻历史、生育历史等。在这个情景下，患者没有提到具体的婚育史，因此标注为'无'。" \
              "；家族史指的是有关患者直系亲属和近亲属的健康历史信息，具体包括家庭成员的健康状况。在这个情景下，患者没有提到具体的家族史，因此标注为'无'。" \
              "；检查检验结果指的是患者的检查和检验结果，包括各种医学测试和检查所得到的数据，用于评估患者的生理状况、疾病诊断和治疗进展。在这个情景下，患者表示暂时没有进行过体检或其他相关的检查，因此标注为'暂时没有'。" \
              "\n" \
              '医生和患者的对话内容为：' \
              "\n" \
              '医生和患者的对话内容为：'


    prompt += prompt_his
    return prompt

class PromptTestRecordHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No impliment''')




    def post(self):
        ret = {'status': 200}
        try:
            # 读取参数
            args = self.request.body.decode('utf-8')
            logger.info(args)
            args = json.loads(args)
            logger.info(args)
            username = args.get('username', '')
            text = args.get('text', '')
            keshi = args.get('keshi', '')
            prompt = args.get('prompt', '')
            messages = args.get('messages', [])
            history = args.get('history', [])
            reverse_1 = args.get('reverse_1', '')
            reverse_2 = args.get('reverse_2', '')

            # history = list(zip(history[0::2], history[1::2]))
            res = test_prompt(history, prompt, messages)
            # res = self.get_medical_record(text, history, messages)


            ret['content'] = res
            ret['reserve_1'] = ''
            ret['reserve_2'] = ''
            ret['status'] = 200
        except Exception as e:
            ret['status'] = 1
            print(traceback.format_exc())
        self.write(json.dumps(ret))


class RetouchHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No impliment''')

    def retouch(self, text, history, messages, use_llms=False):
        pass

    def post(self):
        ret = {'status': 200}
        try:
            global first_problems_keshi
            # 读取参数
            args = self.request.body.decode('utf-8')
            logger.info(args)
            args = json.loads(args)
            logger.info(args)
            username = args.get('username', '')
            text = args.get('text', '')
            use_llms = args.get('use_llms', '')
            keshi = args.get('keshi', '')
            prompt = args.get('prompt', '')
            messages = args.get('messages', [])
            history = args.get('history', [])
            reverse_1 = args.get('reverse_1', '')
            reverse_2 = args.get('reverse_2', '')

            probs = first_problems_keshi.get('common', []) + first_problems_keshi.get(keshi, [])
            random.shuffle(probs)
            res = probs[0]

            ret['content'] = res
            ret['reserve_1'] = ''
            ret['reserve_2'] = ''
            ret['status'] = 200
        except Exception as e:
            ret['status'] = 1
            print(traceback.format_exc())
        self.write(json.dumps(ret))


def handle_cilianxiangres(res):
    logger.info("handle_cilianxiangres is ")
    logger.info(res)
    logger.info(args)
    if "answers" in res:
        res = res["answers"]
        if isinstance(res,(list,tuple)):
            result_list_ = []
            for __xxx in res:
                if isinstance(__xxx,dict):

                    if "content" in __xxx:
                        __xxx_content = __xxx["content"]
                        result_list_.append(__xxx_content)
                else:
                    return []
            return result_list_

    elif isinstance(res, (tuple, list)):
        if isinstance(res[0], str):
            return res
        elif isinstance(res[0], (tuple, list)):
            tmp = []
            for v in res:
                tmp.extend(v)
            res = handle_cilianxiangres(tmp)
            return res
    else:
        print(f'result formate error!')
        return []

def cilianxiang(text, history, keshi, messages=[]):
    try:
        prompt = f"针对科室为“{keshi}”，给你一个生成任务，根据医生咨询内容，生成患者可能的回答内容，并直接生成5个可能的结果，必须用格式化输出。医生咨询内容为：{history[-1]}"
        res = chat(prompt, messages)
        res = res.replace("'", '"')
        res = json.loads(res)
        res = handle_cilianxiangres(res)

    except:
        res = []
    return res

class CiLianxianghHandler(tornado.web.RequestHandler):

    def get(self):
        self.write('''No impliment''')

    def retouch(self, text, history, messages, use_llms=False):
        pass

    def post(self):
        ret = {'status': 200}
        try:
            global first_problems_keshi
            # 读取参数
            args = self.request.body.decode('utf-8')
            logger.info(args)
            args = json.loads(args)
            logger.info(args)
            username = args.get('username', '')
            text = args.get('text', '')
            use_llms = args.get('use_llms', '')
            keshi = args.get('keshi', '')
            prompt = args.get('prompt', '')
            messages = args.get('messages', [{'role':'system', 'content':'你是一个医生助理， 每次回答以 json  '
                                                                         "{'answers': [{'content': ''},"
                                                                         "{'content': ''},"
                                                                         "{'content': ''},"
                                                                         "{'content': ''},"
                                                                         "{'content': ''}]}"
                                                                         ' 格式返回数据。'}])
            history = args.get('history', [])
            reverse_1 = args.get('reverse_1', '')
            reverse_2 = args.get('reverse_2', '')

            # prompt
            # probs = first_problems_keshi.get('common', []) + first_problems_keshi.get(keshi, [])
            # random.shuffle(probs)
            # res = probs[0]
            res = cilianxiang(text, history, keshi, messages)

            ret['content'] = res
            ret['reserve_1'] = ''
            ret['reserve_2'] = ''
            ret['status'] = 200
        except Exception as e:
            ret['status'] = 1
            print(traceback.format_exc())
        self.write(json.dumps(ret))

if __name__ == '__main__':
    settings = {
    }

    app = Application(
        handlers=[
            (r'/recommend', RecommendHandler),
            (r'/intention', IntentionHandler),
            (r'/generate_medical_record', GenerateMedicalRecordHandler),
            (r'/prompt_test', PromptTestRecordHandler),
            (r'/retouch', RetouchHandler),
            (r'/cilianxiang', CiLianxianghHandler)
        ],
        settings=settings
    )
    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(args.port)
    print(f'Start ' + '-' * 80)
    tornado.ioloop.IOLoop.instance().start()
