"""
File   : chat_prompt_templates.py
Desc   : 对话提示模板
Date   : 2024/11/17
Author : Tianyu Chen
"""
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

chat_model = ChatOpenAI()

prompt_template = ChatPromptTemplate([
    ("system", "你是一个能将{input_language}翻译成{output_language}的助手。"),
    ("user", "{text}")
])
prompt_value = prompt_template.invoke({"input_language": "中文", 
                                       "output_language": "英文", 
                                       "text": "我爱编程"})


if __name__ == "__main__":
    """ PromptValue 对象 """
    # print(prompt_value)

    # messages=[
    #   SystemMessage(content='你是一个能将中文翻译成英文的助手。', additional_kwargs={}, response_metadata={}), 
    #   HumanMessage(content='我爱编程', additional_kwargs={}, response_metadata={})]


    """ PromptValue 对象转化为 Message 列表 """
    # print(prompt_value.to_messages())

    # [
    #   SystemMessage(content='你是一个能将中文翻译成英文的助手。', additional_kwargs={}, response_metadata={}), 
    #   HumanMessage(content='我爱编程', additional_kwargs={}, response_metadata={})
    # ]


    """ 普通调用 """
    # print(chat_model.invoke(prompt_value))

    # content='I love programming.' additional_kwargs={'refusal': None} 
    # response_metadata={
    #   'token_usage': {
    #       'completion_tokens': 4, 
    #       'prompt_tokens': 36, 
    #       'total_tokens': 40, 
    #       'completion_tokens_details': {
    #           'accepted_prediction_tokens': 0, 
    #           'audio_tokens': 0, 
    #           'reasoning_tokens': 0, 
    #           'rejected_prediction_tokens': 0
    #       }, 
    #       'prompt_tokens_details': {
    #           'audio_tokens': 0, 
    #           'cached_tokens': 0
    #       }
    #   }, 
    #   'model_name': 'gpt-3.5-turbo-0125', 
    #   'system_fingerprint': None, 
    #   'finish_reason': 'stop', 
    #   'logprobs': None
    # } 
    # id='run-00b2e5a6-1a16-45bb-8e77-bdad97e36f61-0' 
    # usage_metadata={
    #   'input_tokens': 36, 
    #   'output_tokens': 4, 
    #   'total_tokens': 40, 
    #   'input_token_details': {'audio': 0, 'cache_read': 0}, 
    #   'output_token_details': {'audio': 0, 'reasoning': 0}
    # }
    

    """ LCEL 链式调用 """
    chain = prompt_template | chat_model
    res = chain.invoke({"input_language": "中文", "output_language": "英文", "text": "我爱编程"})
    # print(res.content)  # I love programming.
    print(res)
    # content='I love coding.' 
    # additional_kwargs={'refusal': None} 
    # response_metadata={
    #   'token_usage': {
    #       'completion_tokens': 4, 
    #       'prompt_tokens': 36, 
    #       'total_tokens': 40, 
    #       'completion_tokens_details': {
    #           'accepted_prediction_tokens': 0, 
    #           'audio_tokens': 0, 
    #           'reasoning_tokens': 0, 
    #           'rejected_prediction_tokens': 0
    #       }, 
    #       'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}
    #   }, 
    #   'model_name': 'gpt-3.5-turbo-0125', 
    #   'system_fingerprint': None, 
    #   'finish_reason': 'stop', 
    #   'logprobs': None
    # } 
    # id='run-fba602c9-cc9c-4ee8-be04-1d265b10bff4-0' 
    # usage_metadata={
    #   'input_tokens': 36, 
    #   'output_tokens': 4, 
    #   'total_tokens': 40, 
    #   'input_token_details': {'audio': 0, 'cache_read': 0}, 
    #   'output_token_details': {'audio': 0, 'reasoning': 0}
    # }
