'''
basic llm in langchain
'''

# LangSmith
# https://smith.langchain.com/o/89358f56-ecf9-4b82-87a8-d098d7d16813/settings
import getpass


import os
os.environ["LANGSMITH_TRACING"] = "true"
# os.environ["LANGSMITH_API_KEY"] = smith_key


# 远程llm client
# pip install -U "langchain[openai]"
## https://platform.openai.com/api-keys
# if not os.environ.get("OPENAI_API_KEY"):
#   os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter API key for OpenAI: ")
# os.environ["OPENAI_API_KEY"] = keyx
from langchain.chat_models import init_chat_model
model = init_chat_model("gpt-4o-mini", model_provider="openai")


# tongyi
# Get a new token: https://help.aliyun.com/document_detail/611472.html?spm=a2c4g.2399481.0.0
# pip install dashscope --upgrade # httpserver
import os
# os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
# DASHSCOPE_API_KEY = os.getenv("DASHSCOPE_API_KEY")
# print(DASHSCOPE_API_KEY)
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_core.messages import HumanMessage
chatLLM = ChatTongyi(
    streaming=True,
)
res = chatLLM.stream([HumanMessage(content="你好")], streaming=True)
for r in res:
    print("chat resp:", r)


# # call
# from langchain_core.messages import HumanMessage, SystemMessage
# messages = [
#     SystemMessage("以海盗口吻回答问题"),
#     HumanMessage("你可以做什么"),
# ]
# res = model.invoke(messages)
# print(res)


# from langchain_core.prompts import ChatPromptTemplate
# system_template = "Translate the following from English into {language}"
# prompt_template = ChatPromptTemplate.from_messages(
#     [("system", system_template), ("user", "{text}")]
# )
# prompt = prompt_template.invoke({"language": "中文", "text": "hi!"})
# prompt