from langchain_core.language_models import GenericFakeChatModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate

# 修改提示模板以支持用户输入的问题
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are a helpful assistant that responds in the style of {answer_style}."),
    ("human", "{question}")
])

# 使用 GenericFakeChatModel，可以设置多个响应消息
llm = GenericFakeChatModel(messages=iter([
    "Ahoy there! What be troublin' ye today?",  # 海盗风格
    "Greetings! How may I assist you?",  # 正式风格
    "Hey! What's up?",  # 休闲风格
    "Salutations! How can I be of service?"  # 古典风格
]))

# 创建处理链
chain = prompt | llm | StrOutputParser()


# 测试链的交互功能
def chat_with_chain():
    print("=== 风格化聊天机器人 ===")
    print("可用的回答风格: pirate, formal, casual, classic")

    while True:
        try:
            # 获取用户输入
            style = input("\n选择回答风格: ").strip()
            question = input("请输入您的问题: ").strip()

            if question.lower() in ['quit', 'exit', 'bye']:
                print("再见！")
                break

            # 调用chain获取回答
            response = chain.invoke({
                "answer_style": style,
                "question": question
            })

            print(f"\n🤖 回答 ({style}风格): {response}")

        except KeyboardInterrupt:
            print("\n\n聊天结束！")
            break
        except Exception as e:
            print(f"错误: {e}")


# 作为工具使用（可选）
as_tool = chain.as_tool(
    name="StyleResponder",
    description="回答问题并根据指定风格进行回复。输入应该包含 'answer_style' 和 'question' 参数。"
)

print("工具参数结构:", as_tool.args)
print("\n开始聊天...")
# chat_with_chain()
print(as_tool.name)
print(as_tool.description)
print(as_tool.args)
