from langchain_community.llms import Ollama  # 使用 LangChain 的 Ollama 集成
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.schema import ChatPromptValue

# 配置 Ollama 的本地 API 地址和模型名称
# 假设你已经通过 Ollama 部署了 DeepSeek 模型，并且模型名称为 "deepseek"
model_name = "deepseek-r1:7b"  # 替换为你在 Ollama 中部署的 DeepSeek 模型名称
base_url = "http://localhost:11434"  # Ollama 默认的本地 API 地址

# 初始化 Ollama 模型
model = Ollama(model=model_name, base_url=base_url)

# # 定义一个简单的对话链
# prompt = ChatPromptTemplate.from_messages([
#     ("system", "You are a helpful AI assistant.Do not include <think> tags in your response."),
#     ("user", "{input}")
# ])

# 使用模板参数控制输出结果
system_template = "Translate the following from English into {language}"
prompt = ChatPromptTemplate.from_messages(
    [("system", system_template), ("user", "{input}")]
)

# 创建链
chain = prompt | model | StrOutputParser()

# 运行对话
response = chain.invoke({"language": "Chinese", 
                         "input": "Hello, can you tell me about yourself?"})

print(response)