# LangChain的函数，工具和代理(二)：LangChain的表达式语言(LCEL)

import os
import openai
import json
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.schema.runnable import RunnableMap
from langchain.llms import OpenAI

openai.api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"
# 定义openai的语言模型,默认使用gpt-3.5-turbo模型
model = ChatOpenAI()
# 创建输出解析器
output_parser = StrOutputParser()


# 1、简单链(Simple Chain)
def chain_simple():
    # 通过prompt模板创建一个prompt
    prompt = ChatPromptTemplate.from_template(
        "请为我写一首关于 {topic}的诗。"
    )
    # 创建一个链
    chain = prompt | model | output_parser
    # chain的组成
    # first--->关于prompt模板的信息
    # middle-->关于LLM的信息
    # last--->关于输出解析器的信息
    # print(chain)
    # 执行链
    response1 = chain.invoke({"topic": "大海"})
    response2 = chain.batch([{"topic": "土地"},{"topic": "天空"}])
    response3 = chain.stream({"topic": "桃花"})
    # - invoke：在输入上调用chain
    # - batch：在输入列表上调用chain
    # - stream：将输出内容已流式返回
    print(response1)
    print(response2)
    print(response3)


# 2、更复杂的链(More complex chain)
def chain_complex():
    # 2.1 创建向量数据库
    vectorstore = DocArrayInMemorySearch.from_texts(
        ["人是由猩猩进化而来", "熊猫喜欢吃竹子"],
        embedding=OpenAIEmbeddings()
    )
    # 2.2 创建检索器
    retriever = vectorstore.as_retriever()
    # 2.3 创建Prompt模板
    template = """Answer the question based only on the following context:
    {context}
    Question: {question}
    """
    # 2.4 通过prompt模板创建一个prompt
    prompt = ChatPromptTemplate.from_template(template)
    # 2.5 用来提供和用户问题相关的文档集，
    inputs = RunnableMap({
        # 检索器检索输入的文档
        "context": lambda x: retriever.get_relevant_documents(x["question"]),
        "question": lambda x: x["question"]
    })
    # 2.6 创建一个链
    chain = inputs | prompt | model | output_parser
    # 2.7 执行链
    response = chain.invoke({"question": "人从哪里来？"})
    print(response)


# 3、绑定(Bind) - 用langchain来实现openai的函数调用功能
# 3.1 函数描述对象
functions = [
    {
        "name": "get_current_weather",
        "description": "Get the current weather in a given location",
        "parameters": {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "The city and state, e.g. San Francisco, CA",
                },
                "unit": {
                    "type": "string",
                    "enum": ["celsius", "fahrenheit"]},
            },
            "required": ["location"],
        },
    }
]


# 3.2 构建被调用的函数
def get_current_weather(location, unit="fahrenheit"):
    """Get the current weather in a given location"""
    weather_info = {
        "location": location,  # 城市
        "temperature": "72",  # 温度
        "unit": unit,  # 温度单位
        "forecast": ["sunny", "windy"],  # 天气情况
    }
    return json.dumps(weather_info)

# 3、包含函数的链
def chain_func():
    # 3.3 创建prompt模板
    template = [("human", "{input}")]
    # 3.4 根据模板创建prompt
    prompt = ChatPromptTemplate.from_messages(template)
    # 3.5 创建openai模型并绑定函数描述信息
    # 3.6 使用.bind方法将外部函数描述信息绑定到llm上：
    bindModel = ChatOpenAI(temperature=0).bind(functions=functions)
    # 3.7 创建一个链
    chain = prompt | bindModel
    # 3.8 执行链
    response = chain.invoke({"input": "上海的天气怎么样？"})
    # 3.9 获取到被调用的函数的参数
    arguments = response.additional_kwargs['function_call']['arguments']
    print(arguments)


# 4、后备措施(Fallbacks)
def chain_fallbacks():
    # 让llm写三首诗，并以josn格式输出，每首诗必须包含:标题，作者和诗的第一句。
    challenge = "write three poems in a json blob, where each poem is a json blob of a title, author, and first line"
    # 创建llm
    simple_model = OpenAI(
        temperature=0,
        max_tokens=1000,
        model="text-davinci-001"
    )
    # 创建一个简单chain
    simple_chain = simple_model | output_parser | json.loads

    # 创建llm
    now_model = ChatOpenAI(temperature=0)
    # 创建一个简单chain
    chain = now_model | output_parser | json.loads
    chain.invoke(challenge)
    # 创建最终链
    final_chain = simple_chain.with_fallbacks([chain])
    response = final_chain.invoke(challenge)
    print(response)



if __name__ == '__main__':
    chain_simple()
    # chain_complex()
    # chain_func()
    # chain_fallbacks()