from langchain_ollama import ChatOllama
from langchain_ollama.llms import OllamaLLM
from langchain_core.prompts import ChatPromptTemplate

import os

from local_rag.chunk.file_processor import feedChroma
import chromadb
from chromadb import Settings

# python引用本工程项目的时候会自动调一下自身,所以要加个判断避免重复调用
if __name__ == "__main__":

  # chroma_client = chromadb.HttpClient(host="localhost",port=8000)

  file_name = "sample.pdf"

  sample_file = os.path.join(os.path.dirname(__file__),"example_files",file_name)

  # TODO: use file content md5 as the name of vector db.
  collection = feedChroma(name=file_name,file_path=sample_file)
  
  # chroma_client = chromadb.PersistentClient(path=os.path.join(os.path.dirname(__file__),"..",".db"))
  # collection = chroma_client.get_collection(name="sanguo")
  query_name = "PingCode"
  query_result = collection.query(query_texts=[query_name],n_results=15)

  docs = query_result["documents"]

  context_str:str = ""

  if docs.__len__() > 0:
    print(f"docs is: {str(docs)}")

    for doc in docs:
      context_str = context_str.join(doc)

    print(f"\n\ngot context_str is : {context_str}")

    template = """
    你是一个易中天，请认真阅读一下小说.
    \n\n{context}
    \n\n请根据你的阅读理解回答问题: {question}:
    """

    prompt = ChatPromptTemplate.from_template(template)

    # temperature 越小越精确，反之越大结果越多元化
    model = ChatOllama(model = "qwen2.5:0.5b",temperature = "0",num_predict=256)

    chain = prompt | model

    # messages = [("system","You are a help assistant, good at caculation"),("human","get result of 1+1")]
    # result = model.invoke(messages)

    result = chain.invoke({"question":f"{query_name}的特点是什么?","context":context_str})

    print(f"\n\nresult is: {str(result.content)}")

    print("task end")




