from tools.Doc_split import DocSplit
from tools.Pgvector_op import Pgvector
from tools.Transformer_sentence import BegZh
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
import configparser

# 查询数据库
query = "网络类的知识竞赛加分吗"
cf = configparser.ConfigParser()
cf.read("../../config/config.ini")
print("查询数据库名：" + cf.get("pg1", "table")+'\n')
pg = Pgvector(
    cf.get("pg1", "host"),
    cf.get("pg1", "port"),
    cf.get("pg1", "database"),
    cf.get("pg1", "user"),
    cf.get("pg1", "password"),
    cf.get("pg1", "table"),
)
beg_zh = BegZh(cf.get("beg_model", "path"))
docs=""
doc = pg.select(beg_zh.encode(query))
file_name = doc[0][3]
for i in doc:
    docs += i[1]+"\n"

print("参考资料定位：\n"+docs)
print("资料文件名："+file_name)

llm = Ollama(model="qwen2.5")
prompt = ChatPromptTemplate.from_template("""根据提供的上下文回答以下问题:

<context>
{context}
</context>

Question: {input}""")

document_chain = prompt | llm


print(document_chain.invoke({
    "input": query,
    "context": docs
}))

