
import os

from langchain_community.utilities import SerpAPIWrapper
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAI, GoogleGenerativeAIEmbeddings


os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "langchain_runnable[1.0.1]"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"
os.environ['SERPAPI_API_KEY'] = '47afe0f70fefbe12e10919ee52248ac01d28652b763975bc84347a774805f3b6'

chat = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0)
llm = GoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0)
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")


def simple_demo():

    prompt = PromptTemplate.from_template(template="Hello, {name}!")
    chain = prompt | chat | StrOutputParser()
    print(chain.invoke({"name": "World"}))


def simple_demo2():
    prompt = PromptTemplate.from_template(template="Hello, {name}!")
    chain = prompt.pipe(chat).pipe(StrOutputParser())

    print(chain.invoke({"name": "World"}))
    #llm.with_fallbacks()


def simple_demo3():
    search = SerpAPIWrapper()
    prompt = PromptTemplate.from_template(template="Hello, {name}!")
    chain = prompt.pipe(llm).pipe(StrOutputParser()).pipe(search.run)
    result = chain.invoke({"name": "World"})
    print(result)




"""
 https://python.langchain.com/v0.1/docs/expression_language/primitives/sequence/
 上一个的输出作为下一个的输入
"""
def runnable_map_demo():
    prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")

    chain = prompt | chat | StrOutputParser()

    analysis_prompt = ChatPromptTemplate.from_template("is this a funny joke? {joke}")

    # composed_chain = {"joke": chain} | analysis_prompt | chat | StrOutputParser()

    composed_chain = (
        RunnableParallel({"joke": chain})
        .pipe(analysis_prompt)
        .pipe(chain)
        .pipe(StrOutputParser())
    )

    result = composed_chain.invoke({"topic": "jokes"})
    print(result)


def search_runnable_demo():
    vectorstore = Chroma.from_texts(
        ["harrison worked at kensho"], embedding=embeddings
    )
    retriever = vectorstore.as_retriever()
    template = """Answer the question based only on the following context:
    {context}

    Question: {question}
    """
    prompt = ChatPromptTemplate.from_template(template)

    retrieval_chain = (
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | chat
            | StrOutputParser()
    )

    result = retrieval_chain.invoke("where did harrison work?")
    print(result)


def runnable_bind_demo():
    chain = (
            chat.bind(stop=["four"])
            | StrOutputParser()
    )
    result = chain.invoke("Repeat quoted words exactly: 'One two three four five.'")
    print(result)


if __name__ == "__main__":
    runnable_bind_demo()