from langchain_community.vectorstores import FAISS
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import ChatOpenAI, OpenAI
#
# llm=OpenAI(
#     openai_api_key="key",
#     openai_api_base="https://api.moonshot.cn/v1",
#     model="moonshot-v1-8k",
# )
chat_model = ChatOpenAI(
    openai_api_key="key",
    openai_api_base="https://api.moonshot.cn/v1",
    model="moonshot-v1-8k",
    temperature=0,
    request_timeout=60,
    max_retries=3,
)
# print(chat_model.invoke("你好"))
# messages=[
#     HumanMessage(content="今天的天气如何？")
# ]
# print(chat_model.invoke(messages))

# system_template="你是一个语言助手，可以将{input_language}翻译为{output_language}。"
# system_message_prompt=SystemMessagePromptTemplate.from_template(system_template)
# human_template="{text}"
# human_message_prompt=HumanMessagePromptTemplate.from_template(human_template)
#
# chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# messages=chat_prompt.format_messages(input_language="中文", output_language="日文", text="你的名字是什么？")
# print(messages)

# vectorstore = FAISS.from_texts(
#     ["harrison worked at kensho"], embedding=HuggingFaceEmbeddings()
# )
# retriever = vectorstore.as_retriever()
# template = """Answer the question based only on the following context:
# {context}
#
# Question: {question}
# """
# prompt = ChatPromptTemplate.from_template(template)
# model = ChatOpenAI()
#
# retrieval_chain = (
#     {"context": retriever, "question": RunnablePassthrough()}
#     | prompt
#     | model
#     | StrOutputParser()
# )
#
# retrieval_chain.invoke("where did harrison work?")