# -*- coding:UTF-8 -*-
import json
from fastapi import APIRouter,Body
from typing import Union,List,Optional
from utils import BaseResponse,create_tool_prompt,create_search_prompt,retriveal_docs_in_vecor,OPENBA_3b_MODEL_PATH,BAICHUAN_4b_MODEL_PATH
from open_llm.openba import OpenBA_3B_LLM
from open_llm.baichuan import BaiChuan_LLM

baichuan_4b_llm=BaiChuan_LLM(model_path=BAICHUAN_4b_MODEL_PATH)
openba_3b_llm=OpenBA_3B_LLM(model_path=OPENBA_3b_MODEL_PATH)
# baichuan_4b_llm=None
# openba_3b_llm=None

openba_app=APIRouter(tags=["OpenBA-3B接口"],prefix="/openba_3b")

@openba_app.post("/chat",description="聊天接口")
async def openba_3b_chat(query:str=Body(...,description="用户输入",examples=["公司可以同时变更注册资本和股东吗？"]),
                   
                   history_len:int=Body(-1,description="获取历史对话消息的数量"),
                   history:Union[int,List]=Body([],
                                                description="历史对话信息",
                                                examples=[[
                                                             {"input": "我们来玩成语接龙，我先来，生龙活虎",
                                                              "output": "虎头虎脑"}
                                                        ]]
                                                ),
                   temperature:float=Body(0.95,description="LLM 采样温度",ge=0.0,le=2.0),
                   top_p:float=Body(0.7,description="最低匹配词的概率",ge=0.0,le=1.0),
                   max_tokens:Optional[int]=Body(512,description="限制LLM生成的tokens数量"))->BaseResponse:
      """
      该接口实现的逻辑是：根据问题(query)向向量库中查找相关文档(document)，在利用百川模型判断document能否去回答query。
            能: document+query 送入OpenBA
            不能：搜索引擎结果+query 送入OpenBA
      """
      print("\n\n===============================================================")
      # 由于OpenBA自身原因，输出最大token数为512
      if max_tokens>500:
            max_tokens=500
      
      tool_res = baichuan_4b_llm.tool_chat(prompt=create_tool_prompt(query=query),system_message=None)
      print("【工具模型输出信息：】",tool_res)
      is_need_search_tool=False
      try:
            json_data = json.loads(tool_res)
            if json_data["is_need_search_tool"]=='yes':
                  is_need_search_tool=True
      except Exception as e:
            print("【工具信息解析错误：】",str(e))
            is_need_search_tool=True

      # 前缀
      prefix=""

      try:
            search_prompt=create_search_prompt(is_need_search_tool,query)
            if search_prompt is not None:
                  prefix,query=search_prompt
      except Exception as e:
            print("【查询插件调用失败！】",str(e))

      response=openba_3b_llm.invoke(input=query,prefix=prefix,temperature=temperature,max_tokens=max_tokens,top_p=top_p,chat_historys=history)
      print("【回复内容：】",response)
      print("===============================================================")
      return BaseResponse(data=response)



@openba_app.post("/retriveal",description="检索聊天接口")
async def openba_3b_retriveal(
                   query:str=Body(...,description="用户输入",examples=["公司可以同时变更注册资本和股东吗？"]),
                   temperature:float=Body(0.95,description="LLM 采样温度",ge=0.0,le=2.0),
                   top_p:float=Body(0.7,description="最低匹配词的概率",ge=0.0,le=1.0),
                   max_tokens:Optional[int]=Body(512,description="限制LLM生成的tokens数量"))->BaseResponse:
      print("\n\n===============================================================")
      # 由于OpenBA自身原因，输出最大token数为512
      if max_tokens>500:
            max_tokens=500
      prefix=""
      filenames=[]
      item=retriveal_docs_in_vecor(query)
      if item is not None:
            prefix,query,filenames=item
      response=openba_3b_llm.invoke(input=query,prefix=prefix,temperature=temperature,max_tokens=max_tokens,top_p=top_p)
      print("【回复内容：】",response)
      print("===============================================================")
      return BaseResponse(data={"answer":response,"files":filenames})