from langchain.chains import RetrievalQA
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.callbacks.manager import CallbackManager
from langchain_community.llms import Ollama
from langchain_community.embeddings.ollama import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.document_loaders import TextLoader
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
import os
import time

with open("modelname.cfg","r") as f:
    modelname=f.readline().strip()
print("using llm modle",modelname)

llm = Ollama(base_url="http://localhost:11434",
                                  model=modelname,
                                  verbose=False,
                                  callback_manager=CallbackManager(
                                      [StreamingStdOutCallbackHandler()]),
                                  )


vectorstore = Chroma(persist_directory='jj',embedding_function=OllamaEmbeddings(model=modelname))
print("embeddong chroma db lead done")
print("vectorstore get documents is",vectorstore.get())

print("try retriver with meta data")
document_content_description = "学生在倍塔狗人工智能教育学习的课程内容记录"

metadata_field_info=[
    AttributeInfo(
        name="year",
        description="year of date of study",
        type="integer",
    ),
    AttributeInfo(
        name="month",
        description="month of date of study",
        type="integer",
    ),
    AttributeInfo(
        name="day",
        description="day of date of study",
        type="integer",
    ),
    AttributeInfo(
        name="date",
        description="date of study",
        type="string",
    ),
    AttributeInfo(
        name="quater",
        description="quator of date of study",
        type="integer"
    ),
    AttributeInfo(
        name="student",
        description="name of student",
        type="string"
    ),

    AttributeInfo(
        name="school",
        description="name of site of school",
        type="string"
    ),

    AttributeInfo(
        name="duration",
        description="duration of study in hours",
        type="float"
    ),
]

retriever = SelfQueryRetriever.from_llm(
    llm,
    vectorstore,
    document_content_description,
    metadata_field_info,
    enable_limit=False,
    verbose=False
)

N=0
questions=["总结一下2023年陈知远在倍塔狗学习了什么",
            "陈知远2023年1季度在倍塔狗学习了什么"
            ]

def retrieverinvoketest(question):
    global N,retriever
    try:
        N+=1
        question="总结一下2023年陈知远在倍塔狗学习了什么"
        result=retriever.query_constructor.invoke(question)
        print(N,modelname+"分析你的问题：",question,result)
    except:
        print("AI have exception")

for question in questions:
    retrieverinvoketest(question)

print("get get_relevant_documents  from retriever")
docs=retriever.get_relevant_documents(question)
print(len(docs),"number of docs get for",question)
for doc in docs:
    print(doc)

print("using vectorstore as retiever")
retriever = vectorstore.as_retriever()

docs=retriever.get_relevant_documents(question)
for doc in docs:
    print(doc)
print(len(docs),"number of docs get for",question)
print("bye bye")

filtered_retriever = FilteredRetriever(vectorstore=store.as_retriever(), filter_prefix=source_filter)

chain = ConversationalRetrievalChain.from_llm(
    llm=llm,
    retriever=filtered_retriever,
    verbose=True,
    return_source_documents=True,
)