import threading

from langchain.schema import retriever
from langchain.schema.runnable.base import RunnableMap
from langchain_community.vectorstores import Chroma

from langchain_core.example_selectors import SemanticSimilarityExampleSelector
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage

from langchain_core.output_parsers import StrOutputParser, BaseOutputParser
from langchain_core.runnables import RunnableSequence
from langchain_core.vectorstores import VectorStore
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_community.document_loaders import WebBaseLoader
from langchain.prompts import PromptTemplate
from langchain.chains import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, SystemMessagePromptTemplate, \
    HumanMessagePromptTemplate, FewShotPromptTemplate, FewShotChatMessagePromptTemplate, ChatMessagePromptTemplate, \
    MessagesPlaceholder

from langchain_experimental.pal_chain import PALChain
import requests
from IPython.display import Image
import logging as log
import json

from redis.client import Redis

# Initialize Model
llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7)
log.basicConfig(level=log.DEBUG, format='%(asctime)s - %(levelname)s - Line %(lineno)d - %(message)s')


######## 示例  https://blog.csdn.net/weixin_42608414/article/details/135030432

#### 参考文档 https://python.langchain.com/docs/integrations/chat/google_generative_ai/#streaming-and-batching
# Load the blog


def test1():
    loader = WebBaseLoader("https://yylives.cc/2024/03/30/tutorial-using-langchain-and-gemini-to-summarize-articles/")
    docs = loader.load()

    # Define the Summarize Chain
    template = """写出以下内容的简洁摘要:
    "{text}"
    CONCISE SUMMARY:"""

    prompt = PromptTemplate.from_template(template)

    llm_chain = LLMChain(llm=llm, prompt=prompt)

    stuff_chain = StuffDocumentsChain(llm_chain=llm_chain, document_variable_name="text")

    # Invoke Chain
    response = stuff_chain.invoke(docs)
    text = response["output_text"]
    print(text)


def test2():
    result = llm.invoke("LLM 是什么？")
    print(result.content)


def test3():
    for chunk in llm.stream("写一首关于程序员苦逼诗。"):
        print(chunk.content)
        print("---------------------")


def test4():
    results = llm.batch(
        [
            "2+2等于几?",
            "3+5等于几?",
        ]
    )
    for res in results:
        print(res.content)


def test5():
    prompt = ChatPromptTemplate.from_template(
        "给我讲一个关于{topic}的笑话"
    )
    output_parser = StrOutputParser()

    chain = prompt | llm | output_parser

    response = chain.invoke({"topic": "躺平"})
    print(response)


def test6_embed():
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    embed = embeddings.embed_query("Gemini Pro 是 GoogleDeepMind 开发的大型语言模型。")
    print(embed)
    print(len(embed))
    # vectorstore = DocArrayInMemorySearch.from_texts(
    #     ["Gemini Pro 是 GoogleDeepMind 开发的大型语言模型。",
    #      "Gemini 可以是一个星座，也可以是一系列语言模型的名称。",
    #      "人是由恐龙进化而来的。",
    #      "熊猫喜欢吃天鹅肉。"],
    #
    #     embedding=embeddings  # passing in the embedder model
    # )
    #
    # retriever = vectorstore.as_retriever()
    # retriever.get_relevant_documents("Gemini 是什么?")


def test7():
    # 创建prompt模板
    template = """Answer the question a a full sentence, based only on the following context:
    {context}
    Question: {question}
    """

    # 由模板生成prompt
    prompt = ChatPromptTemplate.from_template(template)
    output_parser = StrOutputParser()
    # 创建chain
    chain = prompt | llm | output_parser
    print(chain.invoke({"question": "谁开发了 Gemini Pro?", "context": "什么内容都可以，我知道程序员开发的"}))


def test8():
    pal_chain = PALChain.from_math_prompt(llm, verbose=True)
    question = "食堂有23个苹果。如果午餐用了20个，之后又买了6个，那么食堂最后还剩多少个苹果？"
    result = pal_chain.invoke(question)
    print(result)

    question = "如果小明早上 7:00 起床，并且他在家花了 1 小时吃早餐，然后又花了 30 分钟步行去学校，小明几点到的学校？"
    result = pal_chain.invoke(question)
    print(result)


def test9():
    image_url = "https://upload.wikimedia.org/wikipedia/commons/e/e7/Everest_North_Face_toward_Base_Camp_Tibet_Luca_Galuzzi_2006.jpg"
    content = requests.get(image_url).content
    Image(content, width=300)
    message = HumanMessage(
        content=[
            {
                "type": "text",
                "text": "这个图片里有什么,它位于什么地方？此情此景吟诗一首",
            },  # You can optionally provide text parts
            {
                "type": "image_url",
                "image_url": image_url
            },
        ]
    )

    print(llm.invoke([message]).content)


def test10():
    messages = [
        SystemMessage(content="You are a helpful assistant that translates English to Chinese."),
        HumanMessage(content="Translate this sentence from English to Chinese. I love programming.")
    ]
    result = llm.invoke(
        messages
    )

    print(result)


def test11():
    # text = "What would be a good company name for a company that makes colorful socks?"
    # messages = [HumanMessage(content=text)]
    # result = llm.invoke(messages)
    #
    # template = "You are a helpful assistant that translates {input_language} to {output_language}."
    # system_message_prompt = SystemMessagePromptTemplate.from_template(template)
    # human_template = "{text}"
    # human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
    #
    # chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
    #
    # chat_prompt = chat_prompt.format_messages(input_language="English", output_language="中文",
    #                                           text="I love programming.")
    # result = llm.invoke(chat_prompt)
    #print(result)

    template = """You are a helpful assistant who generates comma separated lists.
    A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.
    ONLY return a comma separated list, and nothing more."""
    system_message_prompt = SystemMessagePromptTemplate.from_template(template)
    human_template = "{text}"
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

    chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])

    chain = chat_prompt | llm | CommaSeparatedListOutputParser()

    result = chain.invoke({"text": "colors"})


    log.info(result)



class CommaSeparatedListOutputParser(BaseOutputParser):
    """Parse the output of an LLM call to a comma-separated list."""


    def parse(self, text: str):
        """Parse the output of an LLM call."""
        return text.strip().split("\n")

def read_file(file_path):
    # 打开 JSON 文件
    with open(file_path, 'r') as file:
        # 从文件中加载 JSON 数据
        return json.load(file)

def test12():
    """
    :param PromptTemplate:
    :return:
    """
    prompt_template = PromptTemplate.from_template(
        "Tell me a {adjective} joke about {content}."
    )
    result = prompt_template.format(adjective="funny", content="chickens")
    log.info(result)
    invalid_prompt = PromptTemplate(
        input_variables=["adjective"],
        template="Tell me a {adjective} joke about {content}."
    )
    log.info(invalid_prompt.input_variables[0])

    template = ChatPromptTemplate.from_messages([
        ("system", "You are a helpful AI bot. Your name is {name}."),
        ("human", "Hello, how are you doing?"),
        ("ai", "I'm doing well, thanks!"),
        ("human", "{user_input}"),
    ])

    messages = template.format_messages(
        name="Bob",
        user_input="What is your name?"
    )
    log.info(messages)

    template = ChatPromptTemplate.from_messages(
        [
            SystemMessage(
                content=(
                    "You are a helpful assistant that re-writes the user's text to "
                    "sound more upbeat."
                )
            ),
            HumanMessagePromptTemplate.from_template("{text}"),
        ]
    )
    result = llm.invoke(template.format_messages(text="i dont like eating tasty things."))
    log.info(result)
    examples = [
        {
            "question": "Who lived longer, Muhammad Ali or Alan Turing?",
            "answer":
                """
                Are follow up questions needed here: Yes.
                Follow up: How old was Muhammad Ali when he died?
                Intermediate answer: Muhammad Ali was 74 years old when he died.
                Follow up: How old was Alan Turing when he died?
                Intermediate answer: Alan Turing was 41 years old when he died.
                So the final answer is: Muhammad Ali
                """
        },
        {
            "question": "When was the founder of craigslist born?",
            "answer":
                """
                Are follow up questions needed here: Yes.
                Follow up: Who was the founder of craigslist?
                Intermediate answer: Craigslist was founded by Craig Newmark.
                Follow up: When was Craig Newmark born?
                Intermediate answer: Craig Newmark was born on December 6, 1952.
                So the final answer is: December 6, 1952
                """
        },
        {
            "question": "Who was the maternal grandfather of George Washington?",
            "answer":
                """
                Are follow up questions needed here: Yes.
                Follow up: Who was the mother of George Washington?
                Intermediate answer: The mother of George Washington was Mary Ball Washington.
                Follow up: Who was the father of Mary Ball Washington?
                Intermediate answer: The father of Mary Ball Washington was Joseph Ball.
                So the final answer is: Joseph Ball
                """
        },
        {
            "question": "Are both the directors of Jaws and Casino Royale from the same country?",
            "answer":
                """
                Are follow up questions needed here: Yes.
                Follow up: Who is the director of Jaws?
                Intermediate Answer: The director of Jaws is Steven Spielberg.
                Follow up: Where is Steven Spielberg from?
                Intermediate Answer: The United States.
                Follow up: Who is the director of Casino Royale?
                Intermediate Answer: The director of Casino Royale is Martin Campbell.
                Follow up: Where is Martin Campbell from?
                Intermediate Answer: New Zealand.
                So the final answer is: No
                """
        }
    ]


    example_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")

    log.info(example_prompt.format(**examples[0]))

    prompt = FewShotPromptTemplate(
        examples=examples,
        example_prompt=example_prompt,
        suffix="Question: {input}",
        input_variables=["input"]
    )

    log.info(prompt.format(input="Who was the father of Mary Ball Washington?"))

    example_selector = SemanticSimilarityExampleSelector.from_examples(
        # This is the list of examples available to select from.
        # zh:这是可供选择的示例列表。
        examples,
        # This is the embedding class used to produce embeddings which are used to measure semantic similarity.
        # zh:这是用于生成嵌入的嵌入类，这些嵌入用于测量语义相似性。
        GoogleGenerativeAIEmbeddings(model="models/embedding-001"),
        # This is the VectorStore class that is used to store the embeddings and do a similarity search over.
        # zh:这是用于存储嵌入并进行相似性搜索的 VectorStore 类。
        Chroma,
        # This is the number of examples to produce.
        # zh:这是要生成的示例数。
        k=1
    )

    # Select the most similar example to the input.
    # zh:选择与输入最相似的示例。
    question = "Royale from the same country?"
    selected_examples = example_selector.select_examples({"question": question})
    log.info(f"Examples most similar to the input: {question}")
    for example in selected_examples:

        for k, v in example.items():
            log.info(f"{k}: {v}")
    # zh:这是一个聊天提示词模板，它将输入变量作为输入，并将其格式化为包含示例的提示词。
    examples = [
        {"input": "2+2", "output": "4"},
        {"input": "2+3", "output": "5"},
    ]

    # This is a prompt template used to format each individual example.
    # zh:这是一个提示词模板，用于格式化每个单独的示例。
    example_prompt = ChatPromptTemplate.from_messages(
        [
            ("human", "{input}"),
            ("ai", "{output}"),
        ]
    )

    few_shot_prompt = FewShotChatMessagePromptTemplate(
        example_prompt=example_prompt,
        examples=examples,
    )

    log.info(few_shot_prompt.format())
    final_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", "You are wonderous wizard of math."),
            few_shot_prompt,
            ("human", "{input}"),
        ]
    )
    chain = final_prompt | llm
    result = chain.invoke({"input": "我这是在干嘛？"})
    log.info(result.content)

    examples = [
        {"input": "2+2", "output": "4"},
        {"input": "2+3", "output": "5"},
        {"input": "2+4", "output": "6"},
        {"input": "3+3", "output": "7"},
        {"input": "how is the weather today", "output": "very good"},
        {"input": "What did the cow say to the moon?", "output": "nothing at all"},
        {
            "input": "Write me a poem about the moon",
            "output": "One for the moon, and one for me, who are we to talk about the moon?",
        },
    ]

    # 由于我们使用向量存储来根据语义相似性选择示例，因此我们需要首先填充存储。
    to_vectorize = [" ".join(example.values()) for example in examples]
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vectorstore = Chroma.from_texts(to_vectorize, embeddings, metadatas=examples)
    result = vectorstore.search('2+4', search_type='similarity')
    log.info(result)

    # 创建向量库后，可以创建 example_selector .在这里，我们将不建议仅获取前 2 个示例。
    example_selector = SemanticSimilarityExampleSelector(
        vectorstore=vectorstore,
        k=2,
    )

    # The prompt template will load examples by passing the input do the `select_examples` method
    # zh:提示词模板将通过将输入传递给 `select_examples` 方法来加载示例
    result = example_selector.select_examples({"input": "horse"})
    log.info(result)

    # Define the few-shot prompt.
    # zh:定义少量提示。
    few_shot_prompt = FewShotChatMessagePromptTemplate(
        # The input variables select the values to pass to the example_selector
        # zh:输入变量选择要传递给 example_selector 的值
        input_variables=["input"],
        example_selector=example_selector,
        # Define how each example will be formatted.
        # zh:定义每个示例的格式。
        # In this case, each example will become 2 messages:
        # zh:在这种情况下，每个示例将成为 2 条消息：
        # 1 human, and 1 AI
        # zh:1 人类和 1 人工智能
        example_prompt=ChatPromptTemplate.from_messages(
            [("human", "{input}"), ("ai", "{output}")]
        ),
    )
    final_prompt = ChatPromptTemplate.from_messages(
        [
            ("system", "you are a weather expert"),
            few_shot_prompt,
            ("human", "{input}"),
        ]
    )
    log.info(few_shot_prompt.format(input="What's 3+3?"))
    chain = final_prompt | llm
    result = chain.invoke({"input": "how is the weather today"})
    log.info(result.content)

    jinja2_template = "Tell me a {{ adjective }} joke about {{ content }}"
    prompt = PromptTemplate.from_template(jinja2_template, template_format="jinja2")

    message = prompt.format(adjective="funny", content="chickens")
    log.info(message)

    fstring_template = """Tell me a {adjective} joke about {content}"""
    prompt = PromptTemplate.from_template(fstring_template)

    message = prompt.format(adjective="funny", content="chickens")
    log.info(message)

    prompt = "May the {subject} be with you"

    chat_message_prompt = ChatMessagePromptTemplate.from_template(role="Jedi", template=prompt)
    message = chat_message_prompt.format(subject="force")
    log.info(message)

    human_prompt = "Summarize our conversation so far in {word_count} words."
    human_message_template = HumanMessagePromptTemplate.from_template(human_prompt)

    chat_prompt = ChatPromptTemplate.from_messages([MessagesPlaceholder(variable_name="conversation"), human_message_template])
    log.info(chat_prompt)

    human_message = HumanMessage(content="What is the best way to learn programming?")
    ai_message = AIMessage(content="""\
    1. Choose a programming language: Decide on a programming language that you want to learn.

    2. Start with the basics: Familiarize yourself with the basic programming concepts such as variables, data types and control structures.

    3. Practice, practice, practice: The best way to learn programming is through hands-on experience\
    """)

    chat_prompt.format_prompt(conversation=[human_message, ai_message], word_count="10").to_messages()

import time  # 用于计时
import asyncio  # 用于处理异步编程

def generate_serially():
    for _ in range(2):  # 循环10次
        messages = [HumanMessage(content="Hello, how are you?")]
        resp = llm.invoke(messages)  # 调用generate方法生成文本
        print(resp.content)  # 打印生成的文本

# 定义一个异步生成文本的函数
async def async_generate(llm):
    print("Coroutine name:", asyncio.current_task().get_name())
    messages = [HumanMessage(content="Hello, how are you?")]
    resp = await llm.invoke(messages) # 异步调用agenerate方法生成文本
    print(resp.content)  # 打印生成的文本

# 定义一个并发（异步）方式生成文本的函数
async def generate_concurrently():
    tasks = [async_generate(llm) for _ in range(10)]  # 创建10个异步任务
    await asyncio.gather(*tasks)  # 使用asyncio.gather等待所有异步任务完成

def test13():
    messages = [HumanMessage(content="给我讲个诗词")]
    # result = llm.invoke(messages)
    # log.info(result)

    # 记录当前时间点
    s = time.perf_counter()
    # 使用异步方式并发执行生成文本的任务
    # 如果在Jupyter以外运行此代码，使用 asyncio.run(generate_concurrently())
    asyncio.run(generate_concurrently())
    # 计算并发执行所花费的时间
    elapsed = time.perf_counter() - s
    print("\033[1m" + f"Concurrent executed in {elapsed:0.2f} seconds." + "\033[0m")


def test14():
    messages = [HumanMessage(content="10岁以内的小朋友适合哪种语言入手做开发")]
    for chunk in llm.stream(messages):
        log.info(chunk.content)
        print("---------------------")

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

def test15():
    llm = ChatGoogleGenerativeAI(model="models/gemini-1.5-pro-latest", temperature=0.7,
                                 streaming=True, callbacks=[StreamingStdOutCallbackHandler()])
    result = llm.invoke("Write me a song about sparkling water.")
    log.info(result)

def test16():
    human_template = "{text}"
    human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
    chat_prompt = ChatPromptTemplate.from_messages([human_message_prompt])

    chain = chat_prompt | llm | StrOutputParser()


    result = chain.invoke({"text": "什么是人工智能？"})
    log.info(result)

from langchain_elasticsearch import ElasticsearchStore
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
def test17():
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")

    loader = TextLoader("F:/tmp/demo/467200248386300.txt")
    documents = loader.load()
    elastic_vector_search = ElasticsearchStore(
        es_url="",
        index_name="test_index_emb",
        embedding=embeddings,
        es_user="elastic",
        es_password=""
    )

    text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
    docs = text_splitter.split_documents(documents)
    elastic_vector_search.from_documents(docs,index_name="test_index_emb",es_url="",
        embedding=embeddings,
        es_user="elastic",
        es_password="")
    print(docs)

def test17Search():
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    elastic_vector_search = ElasticsearchStore(
        es_url="",
        index_name="doc-common-wikipedia-concat-title-1800-200-bge-m3",
        embedding=embeddings,
        es_user="elastic",
        es_password="",
        vector_query_field="demo"
    )
    query = 'Title'
    result = elastic_vector_search.similarity_search(query,k=1)
    print(result)


from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.tools.render import format_tool_to_openai_function
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.agents import tool


@tool
def get_word_length(word: str) -> int:
    """Returns the length of a word."""
    return len(word)



def test18():
    prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                "You are very powerful assistant, but bad at calculating lengths of words.",
            ),
            ("user", "{input}"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ]
    )
    tools = [get_word_length]
    llm_with_tools = llm.bind(functions=[convert_to_openai_function(t) for t in tools])
    agent = (
            {
                "input": lambda x: x["input"],
                "agent_scratchpad": lambda x: format_to_openai_function_messages(
                    x["intermediate_steps"]
                ),
            }
            | prompt
            | llm_with_tools
            | OpenAIFunctionsAgentOutputParser()
    )
    result = agent.invoke({"input": "how many letters in the word educa?", "intermediate_steps": []})
    print(result)
    user_input = "how many letters in the word educa?"
    intermediate_steps = []
    while True:
        output = agent.invoke(
            {
                "input": user_input,
                "intermediate_steps": intermediate_steps,
            }
        )
        if isinstance(output, AgentFinish):
            final_result = output.return_values["output"]
            break
        else:
            print(f"TOOL NAME: {output.tool}")
            print(f"TOOL INPUT: {output.tool_input}")
            tool = {"get_word_length": get_word_length}[output.tool]
            observation = tool.run(output.tool_input)
            intermediate_steps.append((output, observation))
    print(final_result)

from langchain_core.agents import AgentFinish



if __name__ == "__main__":
    test18()
