from pymilvus import connections

from llmConfig import llm
from pydantic import BaseModel, Field
from langchain.output_parsers import PydanticOutputParser
from langchain_core.prompts import PromptTemplate

class StructureDescription(BaseModel):
    acupoint: str = Field(description="穴位信息")
    symptom: str = Field(description="对应症状")
    disease: str = Field(description="疾病名称")
    description: str = Field(description="大模型整合回答")
#定义输出解析器
output_parser = PydanticOutputParser(pydantic_object=StructureDescription)
#获取输出格式指示
format_instructions = output_parser.get_format_instructions()

#prompt
PROMPT_TEMPLATE = """
System: You are an AI assistant, and provides answers to questions by using fact based and statistical information when possible.
Please generate query statements for a knowledge graph based on questions.
The question is: “{context}”
The keywords are as follows:
- Acupoint information: {acuinfo}
- Disease: {dise}
- Symptom: {symp}
If the user doesn't mention about the thing, just return null string, don't try to make up an answer.
For example, if user sends:"我感觉腹部疼痛。" You should return:
Acupoint information:"腹部",
Disease:"腹痛",
Symptom:"腹痛"

<context>
{context}
</context>

<question>
{acuinfo}
{dise}
{symp}
</question>

The response should be specific and use statistics or numbers when possible:
{format_instructions}

Assistant:"""
prompt = PromptTemplate(template=PROMPT_TEMPLATE, input_variables=["context", "acuinfo", "dise", "symp"],partial_variables={"format_instructions": format_instructions}) #input_variables:上下文，问题


def queryTool(question: str, acuinfo: str, dise: str, symp: str) -> str:
    connections.connect("default", host="localhost", port="19530")
    try:
        formatted_prompt = prompt.format(context=question, acuinfo=acuinfo, dise=dise, symp=symp)
        response = llm.invoke(formatted_prompt)
        return response

    except Exception as e:
        print(f"Error generating query: {e}")
        return None