#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author   : Uyynot
# @Email    : uyynot@qq.com
# @Time     : 2025/7/22 9:38
# @File     : demo2.py
# @Project  : demoProject
# @Desc     :
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableMap, RunnableLambda
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(model='qwen-plus', base_url='https://dashscope.aliyuncs.com/compatible-mode/v1', temperature=0.5)
# 1. 常规对话
# messages = [
#     (
#         "system",
#         "You are a helpful assistant that translates English to Chinese. Translate the user sentence.",
#     ),
#     ("human", "I love programming."),
# ]
# ai_msg = llm.invoke(messages)


# 2. 使用提示词模板
# prompt_template = ChatPromptTemplate.from_messages(
#     [
#         (
#             "system",
#             "You are a helpful assistant that translates Chinese to {language}. Translate the user sentence.",
#         ),
#         ("human", "{text}"),
#     ]
# )
#
# # prompt = prompt_template.format_prompt(language="English", text="你好，中国")
# prompt = prompt_template.invoke({"language":"English", "text":"你好，中国"})
# ai_msg = llm.invoke(prompt)
# print(ai_msg)
# # content='Hello, China' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 3, 'prompt_tokens': 36, 'total_tokens': 39, 'completion_tokens_details': None, 'prompt_tokens_details': {'audio_tokens': None, 'cached_tokens': 0}}, 'model_name': 'qwen-plus', 'system_fingerprint': None, 'id': 'chatcmpl-2f3ea309-6e80-9a4f-8483-7a7cf17ec003', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None} id='run--37faaea7-ea8e-46a5-878f-4f5d8c8d6301-0' usage_metadata={'input_tokens': 36, 'output_tokens': 3, 'total_tokens': 39, 'input_token_details': {'cache_read': 0}, 'output_token_details': {}}


# 3. 使用chain
# 提示词模板
# prompt_template = ChatPromptTemplate.from_messages(
#     [
#         (
#             "system",
#             "You are a helpful assistant that translates Chinese to {language}. Translate the user sentence.",
#         ),
#         ("human", "{text}"),
#     ]
# )
# parser = StrOutputParser()
# # chain = prompt_template | llm
# chain = prompt_template | llm | parser
# ai_msg = chain.invoke({"language":"English", "text":"你好，中国"})
# # 'Hello, China'
#
# # 在上述基础上构建更复杂的链
# prompt_template2 = ChatPromptTemplate.from_template("我应该怎么回答这句话？{talk}。给我一个10个字以内的示例。")
# # 首先chain将语言翻译成对应语言；然后将chain的翻译结果做为参数传入prompt_template2，让大模型给出对应的答案
# chan2 = {"talk":chain } | prompt_template2 | llm | parser
# response = chan2.invoke({"text":"今天天气怎么样？", "language":"English"})
# # '今天天气晴朗。'
# print(33)

# 并行执行chain
# # 两个不同语言翻译的提示词模板
# prompt_template_en = ChatPromptTemplate.from_messages((
#     ('system', 'Translate the following from Chinese into English'),
#     ('user', '{text}')
# ))
# prompt_template_jp = ChatPromptTemplate.from_messages((
#     ('system', 'Translate the following from Chinese into Japanese'),
#     ('user', '{text}')
# ))
# # 解析器，取出content
# parser = StrOutputParser()
#
# # 构建两个实现不同翻译的chain
# chain_en = prompt_template_en | llm | parser
# chain_jp = prompt_template_jp | llm | parser
#
# # 并行执行两个chain
# parallel_chain = RunnableMap({"en": chain_en, "jp": chain_jp})
#
# # 合并最后大模型的输出
# # final_chain = parallel_chain | RunnableLambda(lambda x: f'en: {x["en"]}, jp:{x["jp"]}')
# final_chain = parallel_chain | RunnableLambda(lambda x: {"English": x["en"], "Japanese": x["jp"]})
#
# res = final_chain.invoke({"text": "你好，中国"})
# print(res)
#
# final_chain.get_graph().print_ascii()

# 保存聊天记录实现多轮会话
# 基于内存管理历史消息