import os
from langchain import ConversationChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)
from langchain.prompts.chat import (
    ChatPromptTemplate,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)
from langchain.memory import ConversationBufferMemory


os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..." # 谷歌search的key

# LLM初始化和调用
# llm = OpenAI(temperature=0.9)
# text = "What would be a good company name for a company that makes colorful socks?"
# print(llm(text))


# 提示模板（PromptTemplate）: 管理 LLM 的提示
# prompt = PromptTemplate(
#     input_variables=["product"],
#     template="What is a good name for a company that makes {product}?",
# )
# print(prompt.format(product="colorful socks"))


# 链: 在多步骤的工作流中组合 LLM 和提示
# llm = OpenAI(temperature=0.9)
# prompt = PromptTemplate(
#     input_variables=["product"],
#     template="What is a good name for a company that makes {product}?",
# )
# chain = LLMChain(llm=llm, prompt=prompt)
# chain.run("colorful socks")


# 代理 Agent: 基于用户输入的动态调用链
# llm = OpenAI(temperature=0)
# tools = load_tools(["serpapi", "llm-math"], llm=llm)
# agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?")


# 内存: 向链和代理添加状态
# llm = OpenAI(temperature=0)
# conversation = ConversationChain(llm=llm, verbose=True)
# output = conversation.predict(input="Hi there!")
# print(output)


# 从聊天模型获取消息完成
# chat = ChatOpenAI(temperature=0)
# chat([HumanMessage(content="Translate this sentence from English to French. I love programming.")])
# messages = [ # 多条消息
#     SystemMessage(content="You are a helpful assistant that translates English to French."),
#     HumanMessage(content="Translate this sentence from English to French. I love programming.")
# ]
# chat(messages)
# batch_messages = [ # 批量消息
#     [
#         SystemMessage(content="You are a helpful assistant that translates English to French."),
#         HumanMessage(content="Translate this sentence from English to French. I love programming.")
#     ],
#     [
#         SystemMessage(content="You are a helpful assistant that translates English to French."),
#         HumanMessage(content="Translate this sentence from English to French. I love artificial intelligence.")
#     ],
# ]
# result = chat.generate(batch_messages)
# result
# result.llm_output['token_usage'] # token使用情况


# 聊天提示模板
# chat = ChatOpenAI(temperature=0)
# template = "You are a helpful assistant that translates {input_language} to {output_language}."
# system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# human_template = "{text}"
# human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages())


# 具有聊天模型的代理
# chat = ChatOpenAI(temperature=0)
# llm = OpenAI(temperature=0)
# tools = load_tools(["serpapi", "llm-math"], llm=llm)
# agent = initialize_agent(tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
# agent.run("Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?")


# 记忆内存: 向链和代理添加状态
# prompt = ChatPromptTemplate.from_messages([
#     SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."),
#     MessagesPlaceholder(variable_name="history"),
#     HumanMessagePromptTemplate.from_template("{input}")
# ])
# llm = ChatOpenAI(temperature=0)
# memory = ConversationBufferMemory(return_messages=True)
# conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)
# conversation.predict(input="Hi there!")
# conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
# conversation.predict(input="Tell me about yourself.")