
import os

from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain.retrievers import MultiQueryRetriever
from langchain_community.document_loaders import TextLoader, WebBaseLoader
from langchain_community.retrievers import WikipediaRetriever
from langchain_community.vectorstores import Chroma
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain_retrievers[1.0.0]"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"
os.environ['SERPAPI_API_KEY'] = '47afe0f70fefbe12e10919ee52248ac01d28652b763975bc84347a774805f3b6'

embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)

def simple_demo():
    loader = TextLoader("F:/tmp/output_遥远的救世主.txt",encoding="utf8")
    documents = loader.load()
    text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
    texts = text_splitter.split_documents(documents)

    db = Chroma.from_documents(texts, embeddings)
    retriever = db.as_retriever()
    docs = retriever.invoke("所谓的神话")

    print(docs)



def multi_query():
    loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
    data = loader.load()

    # Split
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
    splits = text_splitter.split_documents(data)

    # VectorDB
    vectordb = Chroma.from_documents(documents=splits, embedding=embeddings)

    question = "What are the approaches to Task Decomposition?"

    retriever_from_llm = MultiQueryRetriever.from_llm(
        retriever=vectordb.as_retriever(), llm=llm
    )
    unique_docs = retriever_from_llm.invoke(question)
    print(unique_docs)

def wikipedia_demo():
    retriever = WikipediaRetriever()
    # docs = retriever.invoke("Taiwan china")
    qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever)
    result = qa.invoke({"question": '犹太人的历史', "chat_history": []})
    print(result)


if __name__ == "__main__":
    multi_query()