"""
FewShotPromptTemplate  与 PromptTemplate的使用

FewShotChatMessagePromptTemplate 与 ChatPromptTemplate的使用

Example selectors  实例选择器
"""
import os

import dotenv
from langchain_core.example_selectors import SemanticSimilarityExampleSelector
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate, ChatPromptTemplate,  FewShotChatMessagePromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.vectorstores import FAISS

from chapter01.AliyunDashScopeEmbeddings import AliyunDashScopeEmbeddings


#FewShotPromptTemplate
def fun_1():
    dotenv.load_dotenv()

    # 调用会话模型，这里使用阿里百炼平台里的千问3
    chat_model = ChatOpenAI(
        model=os.getenv("MODEL_NAME")
        , base_url=os.getenv("BASE_URL")
        , api_key=os.getenv("API_KEY")
        , temperature=0.8  # 精确度
        , max_tokens=1024  # 回答长度
        , streaming=True  # 是否流式返回
    )

    prompt_template = PromptTemplate.from_template(template="input{input},output{output}")

    #创建一些示例
    examples = [
        {"input": "小王", "output": "中国人"},
        {"input": "小李", "output": "中国人"},
        {"input": "小张", "output": "中国人"},
        {"input": "小赵", "output": "中国人"},
        {"input": "小刘", "output": "中国人"},
        {"input": "James", "output": "美国人"},
        {"input": "alex", "output": "美国人"},
        {"input": "ryan", "output": "美国人"},
        {"input": "christian", "output": "美国人"}
    ]

    few_shot_prompt_template = FewShotPromptTemplate(
        example_prompt=prompt_template
        , examples=examples
        , suffix="input:{input} , output:"  # 声明在示例后的提示词模板
        , input_variables=["input"])

    prompt = few_shot_prompt_template.format(input="特朗普")
    response = chat_model.invoke(prompt)
    print(response.content)

#fun_1()

#FewShotChatMessagePromptTemplate
def fun_2():
    dotenv.load_dotenv()

    # 调用会话模型，这里使用阿里百炼平台里的千问3
    chat_model = ChatOpenAI(
        model=os.getenv("MODEL_NAME")
        , base_url=os.getenv("BASE_URL")
        , api_key=os.getenv("API_KEY")
        , temperature=0.8  # 精确度
        , max_tokens=1024  # 回答长度
        , streaming=True  # 是否流式返回
    )

    chat_prompt_template = ChatPromptTemplate.from_messages([
        ("human", "{input}")
        , ("ai", "{output}")
    ])

    #创建一些示例
    examples = [
        {"input": "小王", "output": "中国人"},
        {"input": "小李", "output": "中国人"},
        {"input": "小张", "output": "中国人"},
        {"input": "小赵", "output": "中国人"},
        {"input": "小刘", "output": "中国人"},
        {"input": "James", "output": "美国人"},
        {"input": "alex", "output": "美国人"},
        {"input": "ryan", "output": "美国人"},
        {"input": "christian", "output": "美国人"}
    ]

    few_shot_prompt_template = FewShotChatMessagePromptTemplate(
        example_prompt=chat_prompt_template
        , examples=examples
    )

    final_prompt = ChatPromptTemplate.from_messages([
        ("system" , "你是一个AI助手")
        ,few_shot_prompt_template
        ,("human" , "{input}")
    ])

    prompt = final_prompt.format(input="特朗普")
    response = chat_model.invoke(prompt)
    print(response.content)

#fun_2()

#Example selectors  实例选择器  (pip install langchain-community -i https://pypi.tuna.tsinghua.edu.cn/simple , pip install faiss-cpu -i https://pypi.tuna.tsinghua.edu.cn/simple)
def fun_3():
    dotenv.load_dotenv()

    # 调用会话模型，这里使用阿里百炼平台里的千问3
    chat_model = ChatOpenAI(
        model=os.getenv("MODEL_NAME")
        , base_url=os.getenv("BASE_URL")
        , api_key=os.getenv("API_KEY")
        , temperature=0.8  # 精确度
        , max_tokens=1024  # 回答长度
        , streaming=True  # 是否流式返回
    )

    # 定义单个示例的格式化模板
    example_prompt = PromptTemplate(
        input_variables=["input", "output", "task"],
        template="任务类型: {task}\n输入: {input}\n输出: {output}"
    )

    # 创建一些示例
    examples = [
        {"input": "快乐", "output": "sad", "task": "反义词"},
        {"input": "高", "output": "short", "task": "反义词"},
        {"input": "充满活力", "output": "lethargic", "task": "反义词"},
        {"input": "晴朗", "output": "gloomy", "task": "反义词"},
        {"input": "什么是人工智能？", "output": "AI是模拟人类智能的计算机系统", "task": "问答"},
        {"input": "机器学习有哪些类型？", "output": "监督学习、无监督学习、强化学习", "task": "问答"},
        {"input": "LangChain是什么？", "output": "一个用于开发LLM应用的框架", "task": "问答"}
    ]

    #定义嵌入模型
    embeddings = AliyunDashScopeEmbeddings(model="text-embedding-v4")

    # 创建寓意相似的选择器
    example_selector = SemanticSimilarityExampleSelector.from_examples(
        examples , embeddings, FAISS, k=2
    )

    # 创建提示词模板
    similar_prompt = FewShotPromptTemplate(
        example_prompt=example_prompt
        , example_selector=example_selector
        , prefix="请根据已有的示例，给出正确的结果。"
        , suffix="任务类型: {task_type}\n输入: {user_input}\n输出:"  # 提示词模板
        , input_variables=["user_input", "task_type"]
    )

    # 测试示例选择器
    test_input = "什么是深度学习？" #忧伤
    selected_examples = example_selector.select_examples({"input": test_input})
    print("=== 选择的示例 ===")
    for i, example in enumerate(selected_examples):
        print(f"示例 {i + 1}: {example}")

    # # 格式化完整的提示
    # formatted_prompt = similar_prompt.format(
    #     user_input=test_input,
    #     task_type="问答"
    # )
    #
    # print("\n=== 生成的完整提示 ===")
    # print(formatted_prompt)

fun_3()