import os

from langchain.output_parsers import StructuredOutputParser
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import PromptTemplate

from quick_learn.output_parsers_learn.get_info_response import response_schemas


# 获取ai对象
def get_openai_client():
    return ChatOpenAI(
            temperature=0.5,
            model_name='gpt-3.5-turbo',
            base_url='https://api.chatanywhere.tech/v1',
            openai_api_key=os.environ.get('OPENAI_API_KEY'))

# 获取提示词对象
def get_prompt(output_parser):
    format_instructions = output_parser.get_format_instructions()
    template = "answer the users question as best as possible.\n{format_instructions}\n{question}"
    prompt = PromptTemplate(template=template, input_variables=["question"],partial_variables={"format_instructions": format_instructions})
    return prompt

# 获取解析器对象
def get_parse():
    output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
    return output_parser

def doChain():
    output_parser = get_parse()
    llm = get_openai_client()
    prompt = get_prompt(output_parser)

    chain = prompt | llm | output_parser
    res = chain.invoke({"question":"what's the capital of france?", })
    print(res)
