# LangChain的函数，工具和代理(四)：使用 OpenAI 函数进行标记(Tagging) & 提取(Extraction)

import os
from typing import Optional, List

import openai
import json
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.schema.runnable import RunnableMap
from langchain.llms import OpenAI
from langchain_community.output_parsers.ernie_functions import JsonKeyOutputFunctionsParser
from pydantic import BaseModel, Field
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain.prompts import ChatPromptTemplate
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.runnable import RunnableLambda

openai.api_key = "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"
os.environ['OPENAI_API_KEY'] = "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"


# 标记(Tagging)
def chain_tagging():
    # 1、定义pydantic类用以生成openai的函数描述变量
    class Tagging(BaseModel):
        """Tag the piece of text with particular info."""
        sentiment: str = Field(description="sentiment of text, should be `pos`, `neg`, or `neutral`")
        language: str = Field(description="language of text (should be ISO 639-1 code)")

    # 1.2 将pydantic类转换成了openai的函数描述对象（定义pydantic类时文本描述信息不可缺少）
    tagging_function = convert_pydantic_to_openai_function(Tagging)
    # 1.3 创建函数列表
    functions = [
        tagging_function
    ]
    # 1.4 根据模板创建prompt
    prompt = ChatPromptTemplate.from_messages([
        ("system", "Think carefully, and then tag the text as instructed"),
        ("user", "{input}")
    ])
    # 1.5 定义openai的语言模型,默认使用gpt-3.5-turbo模型（创建llm）
    model = ChatOpenAI(temperature=0)
    # 1.6 绑定函数描述变量，指定函数名(意味着强制调用)
    model_with_functions = model.bind(
        functions=functions,
        function_call={"name": "Tagging"}
    )
    # 1.7 创建chain 提示---LLM+函数---JSON输出解析器
    tagging_chain = prompt | model_with_functions | JsonOutputFunctionsParser()
    # 1.8 调用chain
    response = tagging_chain.invoke({"input": "我爱上海"})
    print(response)


# 提取(Extraction)
def chain_extraction():
    # 1、定义pydantic类用以生成openai的函数描述变量
    class Person(BaseModel):
        """Information about a person."""
        name: str = Field(description="person's name")
        age: Optional[int] = Field(description="person's age")

    class Information(BaseModel):
        """Information to extract."""
        people: List[Person] = Field(description="List of info about people")

    # 1.2 将pydantic类转换成了openai的函数描述对象（定义pydantic类时文本描述信息不可缺少）
    infor_function = convert_pydantic_to_openai_function(Information)
    # 1.3 创建函数列表
    functions = [
        infor_function
    ]
    # 1.4 根据模板创建prompt
    prompt = ChatPromptTemplate.from_messages([
        ("system", "Extract the relevant information, if not explicitly provided do not guess. Extract partial info"),
        ("human", "{input}")
    ])
    # 1.5 定义openai的语言模型,默认使用gpt-3.5-turbo模型（创建llm）
    model = ChatOpenAI(temperature=0)
    # 1.6 绑定函数描述变量，指定函数名(意味着强制调用)
    extraction_model = model.bind(functions=functions, function_call={"name": "Information"})
    # 1.7 创建chain 提示---LLM+函数---JSON输出解析器
    extraction_chain = prompt | extraction_model | JsonKeyOutputFunctionsParser(key_name="people")
    # 1.8 调用chain
    response = extraction_chain.invoke({"input": "小明今年15岁，他的妈妈是张丽丽今年40岁，他的爸爸是张三"})
    print(response)


# 真实场景的应用 标记(Tagging) + 提取(Extraction)
# 需求，对某个网站桑的科技文章进行总结，并识别文章用的语言，以及从文章中提取关键词。
def chain_news():
    # 1、创建loader,获取网页数据
    loader = WebBaseLoader("https://tech.ifeng.com/c/8VEctgVlwbk")
    documents = loader.load()
    # 查看网页内容
    doc = documents[0]
    page_content = doc.page_content[:3000]

    # print(page_content)

    # 2、标记(Tagging)
    # 2.1 定义一个Pydantic类“Overview”，它包含了三个成员：summary，language，keywords，其中summary表示对文章内容的总结，language表示文章所使用的语言，keyword表示文章中的关键词：
    class Overview(BaseModel):
        """Overview of a section of text."""
        summary: str = Field(description="Provide a concise summary of the content.")
        language: str = Field(description="Provide the language that the content is written in.")
        keywords: str = Field(description="Provide keywords related to the content.")

    # 2.2 将pydantic类转换成了openai的函数描述对象（定义pydantic类时文本描述信息不可缺少）
    overview_function = convert_pydantic_to_openai_function(Overview)
    # 2.3 创建函数列表
    tagging_functions = [
        overview_function
    ]
    # 2.4 根据模板创建prompt
    tagging_prompt = ChatPromptTemplate.from_messages([
        ("system", "Extract the relevant information, if not explicitly provided do not guess. Extract partial info"),
        ("human", "{input}")
    ])
    # 2.5 定义openai的语言模型,默认使用gpt-3.5-turbo模型（创建llm）
    tagging_model = ChatOpenAI(temperature=0)
    # 2.6 绑定函数描述变量，指定函数名(意味着强制调用)
    tagging_model = tagging_model.bind(functions=tagging_functions, function_call={"name": "Overview"})
    # 2.7 创建chain 提示---LLM+函数---JSON输出解析器
    tagging_chain = tagging_prompt | tagging_model | JsonOutputFunctionsParser()
    # 2.8 调用chain
    tagging_response = tagging_chain.invoke({"input": page_content})
    print(tagging_response)

    # 3、提取(Extraction)
    # 3.1 创建两个Pydantic类News，Info,这两个类用来创建函数描述变量：
    class News(BaseModel):
        """Information about news title and author."""
        title: str = Field(description="The article title is briefly extracted.")
        author: Optional[str] = Field(description="Extract the author's name.")

    class Info(BaseModel):
        """Information to extract"""
        news: List[News] = Field(description="List of info about News")

    # 3.2 将pydantic类转换成了openai的函数描述对象（定义pydantic类时文本描述信息不可缺少）
    extraction_function = convert_pydantic_to_openai_function(Info)
    # 3.3 创建函数列表
    extraction_functions = [
        extraction_function
    ]
    # 3.4 根据模板创建prompt
    extraction_prompt = ChatPromptTemplate.from_messages([
        ("system", "Extract the relevant information, if not explicitly provided do not guess. Extract partial info"),
        ("human", "{input}")
    ])
    # 3.5 定义openai的语言模型,默认使用gpt-3.5-turbo模型（创建llm）
    extraction_model = ChatOpenAI(temperature=0)
    # 3.6 绑定函数描述变量，指定函数名(意味着强制调用)
    extraction_model = extraction_model.bind(functions=extraction_functions, function_call={"name": "Info"})
    # 3.7 创建chain 提示---LLM+函数---JSON输出解析器
    extraction_chain = extraction_prompt | extraction_model | JsonKeyOutputFunctionsParser(key_name="news")
    # 3.8 调用chain
    extraction_response = extraction_chain.invoke({"input": page_content})
    print(extraction_response)

# 提取论文信息
# 我们要从这篇论文中提取title,和author，这里要说明的是之前我们提取的是当前文章的title,和author, 而这里我们要提取不是这篇论文的title和author,而是要提取这篇论文中提及的其他论文的title和author。
def chain_paper():
    # 方式一
    # 1、加载论文
    loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
    documents = loader.load()
    doc = documents[0]
    page_content = doc.page_content[:10000]

    # 2、定义Pydantic类
    # 2.1 创建paper类
    class Paper(BaseModel):
        """Information about papers mentioned."""
        title: str
        author: Optional[str]
    # 2.2 创建Info类
    class Info(BaseModel):
        """Information to extract"""
        papers: List[Paper]

    # 2.3 创建函数描述变量
    paper_extraction_function = [
        convert_pydantic_to_openai_function(Info)
    ]
    # 2.4 根据模板创建prompt
    template = """A article will be passed to you. Extract from it all papers that are mentioned by this article. 
    Do not extract the name of the article itself. If no papers are mentioned that's fine - you don't need to extract any! Just return an empty list.
    Do not make up or guess ANY extra information. Only extract what exactly is in the text."""
    prompt = ChatPromptTemplate.from_messages([
        ("system", template),
        ("human", "{input}")
    ])
    # 2.5 定义openai的语言模型,默认使用gpt-3.5-turbo模型（创建llm）
    model = ChatOpenAI(temperature=0)
    # 2.6 将函数描述变量绑定llm
    model = model.bind(functions=paper_extraction_function,function_call={"name": "Info"})
    # 2.7 创建chain 提示---LLM+函数---JSON输出解析器
    extraction_chain = prompt | model | JsonKeyOutputFunctionsParser(key_name="papers")
    # 2.8 调用chain
    response1 = extraction_chain.invoke({"input": page_content})
    print(response1)


    # 方式二
    #  这里我们发现LLM返回了部分我们所需要的title和author，只不过由于我只从这篇论文中截取了前10000个字符，导致LLM返回的结果并不完整，之所以只截取了论文前10000个字符是因为openai的模型对输入的上下文长度有一定的限制，如果超过限制将会导致异常，为了解决这个问题，我们可以考虑使用langchain的文本分割技术将长文本分割成多个文档块，然后逐一将文档块喂给LLM
    text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=0)
    prep = RunnableLambda(
        lambda x: [{"input": doc} for doc in text_splitter.split_text(x)]
    )
    # 定义矩阵展开函数
    def flatten(matrix):
        flat_list = []
        for row in matrix:
            flat_list += row
        return flat_list

    # 测试展开函数
    flatten([[1, 2], [3, 4]])
    # 创建chain
    # 这里的prep是一个list它包含了多个文档信息(14个文档块)，而prep后面的 extraction_chain.map()作用是将prep中的每个文档单独映射到extraction_chain中，最后使用flatten将输出结果进行展开，如果不使用flatten那么在输出结果中会存在多个list,且每个list中都包含了对应的文档块中的所有title和author，这会让结果看 上去比较混乱。
    chain = prep | extraction_chain.map() | flatten
    # 调用chain
    response2 = chain.invoke(doc.page_content)
    print(response2)

if __name__ == '__main__':
    # chain_tagging()
    # chain_extraction()
    # chain_news()
    chain_paper()