import os
from dotenv import load_dotenv
from langchain_ollama import ChatOllama

def api_test():
    load_dotenv(override=True)
    DeepSeek_API_KEY = os.getenv("DEEPSEEK_API_KEY")
    # client = OpenAI(api_key=DeepSeek_API_KEY, base_url="https://api.deepseek.com")
    # response = client.chat.completions.create(
    #                 model="deepseek-chat",
    #                 messages=[
    #                     {"role": "system", "content": "你是一个智能助手，帮助用户回答问题。"},
    #                     {"role": "user", "content": "请介绍一下你自己"}
    #                 ]
    # )
    # print(response.choices[0].message.content)

    # model用来指定要使用的模型名称，而model_provider用来指定模型提供者。
    # 当写入deepseek时，会自动加载langchain-deepseek包，并使用model="这个名字“进行交互
    from langchain.chat_models import init_chat_model
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    question = "你好，请你介绍下你自己"
    result = model.invoke(question)
    print(result.content)

def ollama_test():
    model = ChatOllama(model="deepseek-r1:14b")
    question = "你好，请你介绍下你自己"
    result = model.invoke(question)
    print(result.content)

def build_chain1():
    model = ChatOllama(model="qwq:32b")
    from langchain_core.output_parsers import StrOutputParser
    from langchain.chat_models import init_chat_model
    # 直接使用模型 + 输出解析器, 搭建一个链
    basic_qa_chain = model | StrOutputParser()
    question = "你好，请你介绍下你自己"
    result = basic_qa_chain.invoke(question)
    print(result)

def build_chain2():
    model = ChatOllama(model="deepseek-r1:7b")
    # 向chain中导入提示词模板
    from langchain.output_parsers.boolean import BooleanOutputParser
    from langchain.prompts import ChatPromptTemplate
    prompt_template = ChatPromptTemplate([
        ("system", "你是一个智能助手，帮助用户回答问题。"),
        ("user", "这是用户的问题: {topic}, 请用 yes 或 no 回答")
    ])

    # 直接使用模型 + 输出解析器
    bool_qa_chain = prompt_template | model | BooleanOutputParser()
    question = "1 + 1 是否 大于 2？"
    result = bool_qa_chain.invoke(question)
    print(result)

# 使用StructuredOutputParser在文档中提取指定的结构化信息
def build_chain3():
    from langchain.output_parsers import ResponseSchema, StructuredOutputParser
    from langchain_core.prompts import PromptTemplate
    model = ChatOllama(model="deepseek-r1:14b")

    schemas = [ResponseSchema(name="name", description="用户的姓名"),
               ResponseSchema(name="age", description="用户的年龄")]

    parser = StructuredOutputParser.from_response_schemas(schemas)
    print(parser.get_format_instructions())

    # 在PromptTemplate中，定义了两个占位符：
    # {input}->将由用户传入的文本替换，即"用户叫李烈，今年24岁..."，
    # {format_instructions}->会通过partial提前绑定结构化格式说明
    prompt = PromptTemplate.from_template("请根据以下内容提取用户信息，并返回json格式: \n{input}\n\n{format_instructions}")

    chain = (prompt.partial(format_instructions = parser.get_format_instructions()) | model | parser)
    result = chain.invoke({"input": "用户叫李烈，今年24岁， 是一个工程师。"})
    print(result)

# 创建复合链，串联
def build_chain4():
    from langchain.output_parsers import ResponseSchema, StructuredOutputParser
    from langchain_core.prompts import PromptTemplate
    from langchain_core.runnables import RunnableSequence
    model = ChatOllama(model="deepseek-r1:14b")

    # 第1步，根据标题生成新闻正文
    news_gen_prompt = PromptTemplate.from_template("请根据以下新闻标题撰写一段简短的新闻内容(100字以内): \n\n标题: {title}")
    # 第一个子链：生成新闻内容
    news_chain = news_gen_prompt | model

    # 第2步，从正文中提取结构化字段
    schemas = [ResponseSchema(name="time", description="事件发生的时间"),
               ResponseSchema(name="location", description="事件发生的地点"),
               ResponseSchema(name="event", description="发生的具体事件")]
    parser = StructuredOutputParser.from_response_schemas(schemas)
    prompt = PromptTemplate.from_template(
        "请根据以下这段新闻内容提取关键信息，并返回json格式: \n{news}\n\n{format_instructions}")

    # 第二个子链：生成摘要
    summary_chain = (prompt.partial(format_instructions=parser.get_format_instructions()) | model | parser)

    # 组合成一个复合chain
    full_chain = news_chain | summary_chain
    result = full_chain.invoke({"title": "深度求索公司在2025年年初发布了deepseek新模型"})
    print(result)

    # # 借助langchain适配器设置自定义节点
    # from langchain_core.runnables import RunnableLambda
    #
    # def debug_print(x):
    #     print("中间结果(新闻正文)：", x)
    #     return x
    # debug_node = RunnableLambda(debug_print)
    # full_chain = news_chain | debug_node | summary_chain
    # result = full_chain.invoke({"title": "深度求索公司在2025年年初发布了deepseek新模型"})
    # print(result)

# 添加多轮对话记忆
def build_chain5():
    # 在langchain中，可以通过人工拼接消息队列，来为每次模型调用设置多轮对话记忆。
    from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
    from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
    from langchain_core.output_parsers import StrOutputParser

    model = ChatOllama(model="deepseek-r1:14b")

    chatbot_prompt = ChatPromptTemplate.from_messages(
        [SystemMessage(content="你叫悠悠，是一名乐于助人的助手。"),
         MessagesPlaceholder(variable_name="messages")])
    basic_qa_chain = chatbot_prompt | model | StrOutputParser()
    messages_list = [
        HumanMessage(content="你好，我叫小真，下午好。"),
        AIMessage(content="你好呀，我叫悠悠，是一名乐于助人的AI助手。很高兴认识你！")
    ]
    question = "你好，请问我叫什么名字？"
    messages_list.append(HumanMessage(content=question))
    print(messages_list)
    result = basic_qa_chain.invoke({"messages": messages_list})
    print(result)

# 流式打印聊天信息：langchain提供了astream方法，一旦模型有输出就即刻返回
def build_chain6():
    from langchain_core.output_parsers import StrOutputParser
    from langchain_core.prompts import ChatPromptTemplate
    model = ChatOllama(model="deepseek-r1:14b")
    parser = StrOutputParser()

    prompt = ChatPromptTemplate([
        ("system", "你叫悠悠，是一个智能助手。"),
        ("user", "{input}")
    ])
    chain = prompt | model | parser
    # async for chunk in chain.astream({"input": "你好,请你介绍下你自己"}):
    #     print(chunk, end="", flush=True)

# 具有记忆功能的chatbot，可多轮对话
def chatbot():
    from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
    from langchain_core.output_parsers import StrOutputParser
    import gradio as gr
    from langchain.chat_models import init_chat_model

    # model = ChatOllama(model="deepseek-r1:14b",
    #                    base_url="http://localhost:11434" ## 必须指向 Ollama 的 API 地址
    #                    )
    load_dotenv(override=True)
    DeepSeek_API_KEY = os.getenv("DEEPSEEK_API_KEY")
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")

    prompt = ChatPromptTemplate([
        ("system", "你叫悠悠，是一个智能助手。"),
        MessagesPlaceholder(variable_name="chat_history"),  # 保留历史
        ("human", "{input}")
    ])
    chain = prompt | model | StrOutputParser()

    async def chat_response(message, history):
        "流式生成AI回应"
        # ✅ 注意：history 需要转换成 messages 格式（LangChain 要求）
        from langchain_core.messages import HumanMessage, AIMessage
        # 将历史对话，转换为 message 列表
        formatted_history = []
        for user_msg, ai_msg in history:
            if user_msg:
                formatted_history.append(HumanMessage(content=user_msg))
            if ai_msg:
                formatted_history.append(AIMessage(content=ai_msg))

        partial_message = ""
        async for chunk in chain.astream({
            "input": message,
            "chat_history": formatted_history}):
                partial_message += chunk
                yield partial_message

    #创建Gradio界面
    def create_chatbot():
        # 自定义CSS样式-居中显示
        css = """
        .main-container {
            max-width: 1200px;
            margin: 0 auto;
            padding: 20px;
        }

        .header-text {
            text-align: center;
            margin-bottom: 20px;
        }
        """
        with gr.Blocks(title="Deepseek Chat", css=css) as demo:
            with gr.Column(elem_classes=["main-container"]):
                # 居中显示标题
                gr.Markdown(
                    "#LangChain入门",
                    elem_classes=["header-text"]
                )
                chatbot = gr.Chatbot(
                    height=500,
                    show_copy_button=True,
                    avatar_images=(
                        "https://cdnjs.cloudflare.com/ajax/libs/twemoji/14.0.2/72x72/1f600.png",  # 😄 用户
                        "https://cdnjs.cloudflare.com/ajax/libs/twemoji/14.0.2/72x72/1f916.png"   # 🤖 机器人
                    ),
                    # type="messages"
                )

                with gr.Row():
                    msg = gr.Textbox(
                        placeholder="请输入您的问题...",
                        container=False,
                        scale=7
                    )
                    submit = gr.Button("发送", scale=1, variant="primary")
                    clear = gr.Button("清空", scale=1)

            # 处理消息发送
            async def respond(message, chat_history):
                if not message.strip():
                    yield "", chat_history
                    return

                # 1. 添加用户消息到历史并立即显示
                chat_history = chat_history + [(message, None)]
                yield "", chat_history  # 立即显示用户消息

                # 2. 流式生成AI回应
                async for response in chat_response(message, chat_history):
                    # 更新最后一条消息的AI回应
                    chat_history[-1] = (message, response)
                    yield "", chat_history

            # 清空对话
            def clear_history():
                return [], ""

            # 绑定事件
            msg.submit(respond, [msg, chatbot], [msg, chatbot])
            submit.click(respond, [msg, chatbot], [msg, chatbot])
            clear.click(clear_history, outputs = [chatbot, msg])

        return demo

    demo = create_chatbot()
    demo.launch(server_name="127.0.0.1",
                server_port=7860,
                share=False,
                debug=True)

# 使用python解释器工具
def use_tools1():
    load_dotenv(override=True)
    from langchain_core.prompts import ChatPromptTemplate
    from langchain_experimental.tools import PythonAstREPLTool
    import pandas as pd
    df = pd.read_csv("./Telco/WA_Fn-UseC_-Telco-Customer-Churn.csv")
    tool = PythonAstREPLTool(locals={"df": df})
    # print(tool.invoke("df['SeniorCitizen'].mean()")) # df['SeniorCitizen'].mean() = 0.1621468124378816

    from langchain.chat_models import init_chat_model
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    llm_with_tools = model.bind_tools([tool])
    # response = llm_with_tools.invoke("我有一张表，名为'df'。你帮我计算SeniorCitizen字段的均值")
    # response中可以得到：additional_kwargs={'tool_calls': [{'id': 'call_0_824df6d1-3841-4db4-8d6d-a4f7d0fadd3c',
    # 'function': {'arguments': '{"query":"df[\'SeniorCitizen\'].mean()"}', 'name': 'python_repl_ast'}, ....需要进一步提取

    from langchain_core.output_parsers.openai_tools import JsonOutputKeyToolsParser
    parser = JsonOutputKeyToolsParser(key_name=tool.name, first_tool_only=True)
    # llm_chain = llm_with_tools | parser
    # llm_chain.invoke("我有一张表，名为'df'。你帮我计算SeniorCitizen字段的均值") #输出：{"query":"df[\'SeniorCitizen\'].mean()"}

    system_message = f"""你可以访问一个名为'df'的pandas数据框，你也可以使用df.head().to_markdown()查看数据集的基本信息。
    请根据用户提出的问题，编写python代码来回答。只返回代码，不返回其他内容。只允许使用pandas和内置库。"""
    prompt = ChatPromptTemplate([("system", system_message), ("user", "{question}")])

    # code_chain = prompt | llm_with_tools | parser
    # print(code_chain.invoke({"question": "请帮我计算SeniorCitizen字段的均值"})) #输出：{"query":"df[\'SeniorCitizen\'].mean()"}

    # chain = prompt | llm_with_tools | parser | tool
    # print(chain.invoke({"question": "请帮我分析gender和Churn和SeniorCitizen三个字段之间的相关关系"}))
    # # 输出:
    # # Gender和Churn的卡方检验结果:
    # # 卡方值: 0.4840828822091383, p值: 0.48657873605618596
    # # SeniorCitizen和Churn的卡方检验结果:
    # # 卡方值: 159.42630036838742, p值: 1.510066805092378e-36
    # # Gender和SeniorCitizen的卡方检验结果:
    # # 卡方值: 0.015604244282376655, p值: 0.9005892996849594

    def code_debug(res):
        print("即将运行python代码：", res['query'])
        return res
    from langchain_core.runnables import RunnableLambda
    print_node = RunnableLambda(code_debug)
    print_code_chain = prompt | llm_with_tools | parser | print_node | tool
    print(print_code_chain.invoke({"question": "帮我计算SeniorCitizen字段的均值"}))
    # # 输出:
    # # 即将运行python代码： df['SeniorCitizen'].mean()
    # # 0.1621468124378816



# 接入自定义工具,比较繁琐的一种实现
def use_tools2():
    load_dotenv(override=True)
    import requests, json
    from langchain_core.tools import tool
    from langchain_core.prompts import ChatPromptTemplate

    @tool
    def get_weather(loc):
        """
        查询即时天气函数
        :param loc: 必要参数，字符串类型，用于表示查询天气的具体城市名称，\
        注意，中国的城市需要用对应城市的英文名称代替
        :return: 解析之后的json格式对象
        """
        url = "https://api.openweathermap.org/data/2.5/weather"
        params = {
            "q": loc,
            "appid": os.getenv("OPENWEATHER_API_KEY"),
            "units": "metric",
            "lang": "zh_cn"
        }

        response = requests.get(url, params=params)
        data = response.json()
        return json.dumps(data)

    # print(get_weather.name)
    from langchain.chat_models import init_chat_model
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    tools = [get_weather]
    llm_with_tools = model.bind_tools(tools)

    # response = llm_with_tools.invoke("你好，上海今天的天气怎么样？")
    # print(response)

    from langchain_core.output_parsers.openai_tools import JsonOutputKeyToolsParser
    parser = JsonOutputKeyToolsParser(key_name="get_weather", first_tool_only=True)
    prompt = ChatPromptTemplate([("user", "{input}")])
    get_weather_chain = prompt | llm_with_tools | parser | get_weather
    # response = get_weather_chain.invoke("你好，上海今天的天气怎么样？")
    # print(response)
    # {"coord": {"lon": 121.4581, "lat": 31.2222}, "weather": [{"id": 803, "main": "Clouds", "description": "\u591a\u4e91", "icon": "04d"}], "base": "stations", "main": {"temp": 33.67, "feels_like": 40.67, "temp_min": 33.67, "temp_max": 33.67, "pressure": 1002, "humidity": 59, "sea_level": 1002, "grnd_level": 1001}, "visibility": 10000, "wind": {"speed": 4.51, "deg": 162, "gust": 5.51}, "clouds": {"all": 80}, "dt": 1754374028, "sys": {"country": "CN", "sunrise": 1754341997, "sunset": 1754390825}, "timezone": 28800, "id": 1796236, "name": "Shanghai", "cod": 200}

    from langchain.prompts import PromptTemplate
    from langchain_core.output_parsers import StrOutputParser
    output_prompt = PromptTemplate.from_template(
        """你将收到一段json格式的天气数据，请用简洁自然的方式将其转述给用户。
        以下是天气json数据：json {weather_json}
        请将其转换为中文天气描述，例如：
        ”北京当前天气晴朗，气温为23C，湿度58%，风速2.1米/秒" 只返回一句话描述，不要其他说明或解释。
        """
    )
    output_chain = output_prompt | model | StrOutputParser()
    # weather_json = '{"coord": {"lon": 121.4581, "lat": 31.2222}, "weather": [{"id": 803, "main": "Clouds", "description": "\u591a\u4e91", "icon": "04d"}], "base": "stations", "main": {"temp": 33.67, "feels_like": 40.67, "temp_min": 33.67, "temp_max": 33.67, "pressure": 1002, "humidity": 59, "sea_level": 1002, "grnd_level": 1001}, "visibility": 10000, "wind": {"speed": 4.51, "deg": 162, "gust": 5.51}, "clouds": {"all": 80}, "dt": 1754374028, "sys": {"country": "CN", "sunrise": 1754341997, "sunset": 1754390825}, "timezone": 28800, "id": 1796236, "name": "Shanghai", "cod": 200}'
    # response = output_chain.invoke(weather_json)
    # print(response) # 上海当前天气多云，气温为33.67°C，体感温度40.67°C，湿度59%，风速4.51米/秒。

    full_chain = get_weather_chain | output_chain
    response = full_chain.invoke("请问天津今天的天气如何？")
    print(response) # 天津当前阴天多云，气温31.72°C（体感38.72°C），湿度77%，风速1.45米/秒。

# 接入自定义工具，借助agent更简洁的实现串联和，并联
def use_tools3():
    from langchain.agents import create_tool_calling_agent, tool
    from langchain_core.prompts import ChatPromptTemplate
    from langchain.chat_models import init_chat_model
    load_dotenv(override=True)
    import requests, json

    @tool
    def get_weather(loc):
        """
        查询即时天气函数
        :param loc: 必要参数，字符串类型，用于表示查询天气的具体城市名称，\
        注意，中国的城市需要用对应城市的英文名称代替
        :return: 解析之后的json格式对象
        """
        url = "https://api.openweathermap.org/data/2.5/weather"
        params = {
            "q": loc,
            "appid": os.getenv("OPENWEATHER_API_KEY"),
            "units": "metric",
            "lang": "zh_cn"
        }

        response = requests.get(url, params=params)
        data = response.json()
        return json.dumps(data)

    tools = [get_weather]  # 定义工具
    prompt = ChatPromptTemplate.from_messages(
        [("system", "你是天气助手，请根据用户的问题，给出相应的天气信息"),
         ("human", "{input}"),
         ("placeholder", "{agent_scratchpad}")])
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    agent = create_tool_calling_agent(model, tools, prompt)
    from langchain.agents import AgentExecutor
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
    # response = agent_executor.invoke({"input": "请问今天北京的天气怎么样"})
    # print(response) #属于串联

    response = agent_executor.invoke({"input": "请问今天北京和上海的天气怎么样？哪个城市更热？"})
    print(response)  # 属于并联
    # 'output': '今天北京的天气是阴天，多云，温度为32.68°C，体感温度39.68°C。  \n上海的天气是晴天，少云，温度为32.94°C，体感温度39.94°C。
    # \n\n从温度来看，上海比北京稍微热一点。'}

# 使用langchain内置工具，创建agent. 以Tavily搜索引擎为例
def use_tools4():
    load_dotenv(override=True)
    from langchain_community.tools.tavily_search import TavilySearchResults
    from langchain.agents import create_tool_calling_agent, tool
    from langchain_core.prompts import ChatPromptTemplate
    from langchain.chat_models import init_chat_model
    from langchain.agents import AgentExecutor
    search = TavilySearchResults(max_results=2)
    # result = search.invoke("苹果2025WWDC发布会")
    # [{'title': 'Apple 全球開發者大會於香港時間6 月10 日當週登場', 'url': 'https://www.apple.com/hk/newsroom/2025/03/apples-worldwide-developers-conference-returns-the-week-of-june-9/', 'content': ...

    tools = [search]
    prompt = ChatPromptTemplate.from_messages(
        [("system", "你是一名乐于助人的助手，并且可以调用工具进行网络搜索，获取实时信息"),
         ("human", "{input}"),
         ("placeholder", "{agent_scratchpad}")])
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    agent = create_tool_calling_agent(model, tools, prompt)
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
    response = agent_executor.invoke({"input": "请问将在2026年举行世界杯的城市是？"})
    print(response) # 'output': '2026年世界杯将由美国、墨西哥和加拿大联合举办，共有16个举办城市，具体如下：\n\n- **美国**：纽约/新泽西...


def main():
    # api_test()
    # ollama_test()
    # build_chain3()
    # build_chain4()
    # build_chain5()
    # chatbot()
    # use_tools2()
    # use_tools3()
    use_tools4()

# 按装订区域中的绿色按钮以运行脚本。
if __name__ == '__main__':
    main()
