from operator import itemgetter

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI

import os

model = ChatOpenAI(model="deepseek-chat",
                   api_key=os.environ.get('DEEPSEEK_API_KEY'),
                   base_url=os.environ.get('DEEPSEEK_BASE_URL'))

chatHistory = [
    {"role": "user", "content": "公司的病假政策是什么？"},
    {"role": "assistant", "content": "公司的病假政策允许员工每年请一定天数的病假。详情及资格标准请参阅员工手册。"},
    {"role": "user", "content": "如何提交病假请求？"}
]


def extract_history(chatHistory):
    #返回chatHistory去除最后一条的其它所有数据
    return chatHistory[:-1]

def extract_question(chatHistory):
    #只返回最后一条
    return chatHistory[-1]["content"]

promptMessageStr = """
您是一个人力资源助理聊天机器人，请只回答与人力资源相关的问题。如果你不知道或这个问题或与人力资源问题无关，请不要回答。
这是你与用户的历史聊天记录: {chatHistory}
同在，请回答这个问题: {question}
"""
# 执行相似度搜素
prompt = PromptTemplate(input_variables=["chatHistory", "question"], template=promptMessageStr)

#方式一
chain = prompt | model | StrOutputParser()
# for chunk in chain.stream({"chatHistory": extract_history(chatHistory), "question": extract_question(chatHistory)}):
#     print(chunk,end="",flush=True)
print(chain.invoke({"chatHistory": extract_history(chatHistory), "question": extract_question(chatHistory)}))

#方式二
# chain = {"chatHistory": itemgetter("messages") | RunnableLambda(extract_history),
#          "question": itemgetter("messages") | RunnableLambda(extract_question)} | prompt | model | StrOutputParser()
#
# for chunk in chain.stream({"messages":chatHistory}):
#     print(chunk,end="",flush=True)