from langchain.llms import OpenAI
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import PromptTemplate
from pydantic import BaseModel, Field

"""
官网
https://python.langchain.com/v0.1/docs/modules/model_io/output_parsers/types/json/
"""
# 大模型
llm = OpenAI(
    api_key="sk-VowKQBUMIkSND8WScNJtDLqf3FyqWHQ43LMVUXH1m6GZaopA",
    base_url="https://ai.nengyongai.cn/v1"
)


def test1():
    """
    让大模型生成 json 结构
    """

    #
    joke_query = "告诉我一个笑话."
    #
    parser = JsonOutputParser()
    #
    prompt = PromptTemplate(
        template="回答用户的查询.\n {format_instructions} \n {query} \n",
        input_variables=["query"],
        partial_variables={"format_instructions": parser.get_format_instructions()},
    )

    #
    chain = prompt | llm | parser
    resp = chain.invoke({"query": joke_query})
    print(resp)


class Joke(BaseModel):
    setup: str = Field(description="设置笑话的问题")
    punchline: str = Field(description="解决笑话的答案")


def test2():
    """
    让大模型生成指定结构的数据
    """

    #
    joke_query = "告诉我一个笑话."
    #
    parser = JsonOutputParser(pydantic_object=Joke)
    #
    prompt = PromptTemplate(
        template="回答用户的查询.\n {format_instructions} \n {query} \n",
        input_variables=["query"],
        partial_variables={"format_instructions": parser.get_format_instructions()},
    )

    #
    chain = prompt | llm | parser
    resp = chain.invoke({"query": joke_query})
    print(resp)


def test3():
    """
    流式处理
    """

    #
    joke_query = "告诉我一个笑话."
    #
    parser = JsonOutputParser()
    #
    prompt = PromptTemplate(
        template="回答用户的查询.\n {format_instructions} \n {query} \n",
        input_variables=["query"],
        partial_variables={"format_instructions": parser.get_format_instructions()},
    )

    #
    chain = prompt | llm | parser
    for s in chain.stream({"query": joke_query}):
        print(s)
