from langchain.chains.llm import LLMChain
from langchain_community.vectorstores import Chroma
from langchain_core.example_selectors import SemanticSimilarityExampleSelector
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate, FewShotPromptTemplate, ChatPromptTemplate, \
    HumanMessagePromptTemplate, FewShotChatMessagePromptTemplate, PipelinePromptTemplate, load_prompt, \
    MessagesPlaceholder
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings, GoogleGenerativeAI
import os
import sys

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "playground"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GOOGLE_API_KEY"] = "AIzaSyBJoz7BvdFgWTBwzcu-0xWpJKfEJOR6vPM"

chat = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)
llm = GoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)

def prompt_template_simple_demo():
    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate.from_template(template)

    chain = prompt | llm

    question = "why is 2+2 = 4?"
    result = chain.invoke({"question": question})
    print(result)


def template_from_demo():
    template = PromptTemplate.from_template("什么问题：{question}")
    result = template.format(question="why is 2+2 = 4?")
    print(type(result))

    template = ChatPromptTemplate.from_messages([
        ("system", "You are a helpful AI bot. Your name is {name}."),
        ("human", "Hello, how are you doing?"),
        ("ai", "I'm doing well, thanks!"),
        ("human", "{user_input}"),
    ])
    print(template.format_messages(name="Bob", user_input="What is your name?"))

    template = ChatPromptTemplate.from_messages(
        [
            # 可以放message
            SystemMessage(
                content=(
                    "You are a helpful AI bot. Your name is {name}."
                )
            ),
            # 可以放template
            HumanMessagePromptTemplate.from_template("Hello, how are you doing?"),
            AIMessage(content="I'm doing well, thanks!"),
            HumanMessage(content="{user_input}")
        ]
    )

    ## 只有template 里面的东西才会被赋值
    print(template.format_messages(name="Bob", user_input="What is your name?"))
    # llm.invoke(template.format_messages(name="Bob", user_input="What is your name?"))

    # chain = LLMChain(llm=llm,prompt=template) # 这种写法已经过期
    chain = template | llm  # 推荐这样写
    print(chain.invoke({"name": "Bob", "user_input": "What is your name?"}))


# 携带例子的template
def prompt_template_few_shot():
    examples = [
        {
            "question": "Who lived longer, Muhammad Ali or Alan Turing?",
            "answer":
                """
                Are follow up questions needed here: Yes.
                Follow up: How old was Muhammad Ali when he died?
                Intermediate answer: Muhammad Ali was 74 years old when he died.
                Follow up: How old was Alan Turing when he died?
                Intermediate answer: Alan Turing was 41 years old when he died.
                So the final answer is: Muhammad Ali
                """
        }
    ]

    ## 聊天的提示词
    example_prompt = ChatPromptTemplate.from_messages(
        [
            ("human", "{question}"),
            ("ai", "{answer}"),
        ]
    )
    prompt = FewShotChatMessagePromptTemplate(
        examples=examples,
        example_prompt=example_prompt
    )

    # example_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")
    # prompt = FewShotPromptTemplate(
    #     examples=examples,
    #     example_prompt=example_prompt,
    #     suffix="Question: {input}",
    #     input_variables=["input"]
    # )

    chain = prompt | llm
    question = "Who lived longer, Muhammad Ali or Alan Turing?"
    result = chain.invoke({"input": question})
    print(result.content)


def template_few_shot_example_selector_similarity():
    examples = [
        {
            "question": "How's the weather today?",
            "answer":
                """
                 It's sunny today
                """
        },
        {
            "question": "What color clothes are you wearing today?",
            "answer":
                """
                 blue
                """
        },

    ]
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vectorstore = Chroma(embedding_function=embeddings)
    ## 不需要存储数据
    example_selector = SemanticSimilarityExampleSelector(vectorstore=vectorstore, k=1)
    ## 需要存储数据
    # example_selector = SemanticSimilarityExampleSelector.from_examples(
    #     # This is the list of examples available to select from.
    #     # zh:这是可供选择的示例列表。
    #     examples,
    #     # This is the embedding class used to produce embeddings which are used to measure semantic similarity.
    #     # zh:这是用于生成嵌入的嵌入类，这些嵌入用于测量语义相似性。
    #     embeddings,
    #     # This is the VectorStore class that is used to store the embeddings and do a similarity search over.
    #     # zh:这是用于存储嵌入并进行相似性搜索的 VectorStore 类。
    #     Chroma,
    #     # This is the number of examples to produce.
    #     # zh:这是要生成的示例数。
    #     k=1
    # )

    # Select the most similar example to the input.
    # zh:选择与输入最相似的示例。
    # question = "what to wear"
    # selected_examples = example_selector.select_examples({"question": question})
    # print(f"Examples most similar to the input: {question}")
    # for example in selected_examples:
    #     print("\n")
    #     for k, v in example.items():
    #         print(f"{k}: {v}")

    example_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")
    prompt = FewShotPromptTemplate(
        example_selector=example_selector,
        example_prompt=example_prompt,
        suffix="Question: {input}",
        input_variables=["input"]
    )

    print(prompt.format_prompt(input="what to wear"))
    chain = prompt | llm
    print(chain.invoke({"input": "what to wear"}))


"""
  一个一个的赋值
"""
def template_prompt_partial():
    prompt = PromptTemplate(template="我说的这是第一个{foo}，这是第二个{bar}", input_variables=["foo", "bar"])
    # 您可以使用 PromptTemplate.partial() 方法创建部分提示模板。
    partial_prompt = prompt.partial(foo="foo")
    partial_prompt = partial_prompt.partial(bar="bar")
    print(partial_prompt.format())

def template_prompt_pipeline():
    full_template = """{introduction}

    {example}

    {start}"""
    full_prompt = PromptTemplate.from_template(full_template)
    introduction_template = """You are impersonating {person}."""
    introduction_prompt = PromptTemplate.from_template(introduction_template)
    example_template = """Here's an example of an interaction:

    Q: {example_q}
    A: {example_a}"""
    example_prompt = PromptTemplate.from_template(example_template)
    start_template = """Now, do this for real!

    Q: {input}
    A:"""
    start_prompt = PromptTemplate.from_template(start_template)
    input_prompts = [
        ("introduction", introduction_prompt),
        ("example", example_prompt),
        ("start", start_prompt)
    ]
    pipeline_prompt = PipelinePromptTemplate(final_prompt=full_prompt, pipeline_prompts=input_prompts)

    ## 必须要填充的参数是哪些
    print(pipeline_prompt.input_variables)
    print(pipeline_prompt.format_prompt(example_q="这是（example_q）", person="这是（person）", input="这是（input）", example_a="这是（example_a）"))


def template_prompt_load():
    prompt = load_prompt("promptstore/simple_prompt.yaml")

def template_prompt_placeholder():
    human_prompt = "Summarize our conversation so far in {word_count} words."
    human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)

    chat_prompt = ChatPromptTemplate.from_messages(
    [MessagesPlaceholder(variable_name="conversation"), human_message_template])
    print(chat_prompt.format_prompt(conversation=["hi", "hello", "how are you?"],word_count="5"))


def template_opt_code():

    prompt = ChatPromptTemplate.from_messages([
        ("system", "You will be provided with statements, and your task is to convert them to standard English."),
        ("user","She no went to the market.")])
    result = prompt.pipe(chat).pipe(StrOutputParser())
    print(result.invoke({}))

if __name__ == '__main__':
    template_opt_code()
