import json
import os
import warnings
from typing import Optional, Any, Iterator

from zai import ZhipuAiClient
from langchain.llms.base import LLM
from langchain_core.outputs import GenerationChunk
from tools.weather import WeatherTool, get_weather
from tools.wanianli import get_wanianli

from langchain.agents import Tool, AgentType, initialize_agent
from langchain.agents.agent import AgentExecutor

from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import DirectoryLoader
from langchain.prompts import ChatPromptTemplate

from langchain_core.runnables import RunnablePassthrough, RunnableParallel

from pydantic import Field
from typing import List, Any

import yaml

# 旧方式（推荐）
from langchain.memory import ConversationBufferMemory
# 新方式（推荐）
from langchain_community.chat_message_histories import ChatMessageHistory

embeddings_path = r'M:\moudels\BAAIbge-large-zh-v1.5'

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    embeddings = HuggingFaceEmbeddings(model_name=embeddings_path)
    load = DirectoryLoader(r'F:\A_wokecode\gradio_study\langchain_study\data')
    vs = FAISS.from_documents(load.load(), embeddings)
    retriever = vs.as_retriever()  # 辨别器


def get_tools_json(tools_paths="./tools"):
    for path in os.listdir(tools_paths):
        if path.endswith(".json"):
            # 读取json文件内容
            with open(os.path.join(tools_paths, path), 'r', encoding='utf-8') as f:
                tool_data = json.load(f)
                return tool_data
    return None


class GLM_4dot5(LLM):
    client: ZhipuAiClient = ZhipuAiClient(api_key="d0fc2026b50344b18e25187d9393ce3f.P2XsXy1lpeqc2Gl0")
    tools: Any = get_tools_json()

    def __init__(self):
        super().__init__()

    def execute_function(self, function_name: str, arguments: dict):
        """执行函数调用"""
        if function_name == "get_weather":
            return get_weather(arguments.get("location", ""))
        elif function_name == "get_wanianli":
            return get_wanianli(arguments.get("year", ""), arguments.get("month", ""), arguments.get("day", ""))
        else:
            return {"error": f"未知函数: {function_name}"}

    # 必须的方法
    @property
    def _llm_type(self):
        return "GLM_4dot5"

    def _call(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> str:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat.completions.create(
            model="glm-4.5-flash",
            messages=messages,
            tools=self.tools,
            tool_choice="auto"
        )
        message = response.choices[0].message
        # print("message ==> ", message)
        messages.append(message.model_dump())
        # print("messages ==>", messages)
        # 处理函数调用
        if message.tool_calls:
            for tool_call in message.tool_calls:
                function_name = tool_call.function.name
                arguments = json.loads(tool_call.function.arguments)

                # 执行函数
                result = self.execute_function(function_name, arguments)

                # 添加函数结果
                messages.append({
                    "role": "tool",
                    "content": json.dumps(result, ensure_ascii=False),
                    "tool_call_id": tool_call.id
                })

            # 获取最终回答
            final_response = self.client.chat.completions.create(
                model="glm-4.5-flash",
                messages=messages,
                tools=self.tools
            )

            return final_response.choices[0].message.content
        else:
            return message.content

    def _stream(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> Iterator:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat.completions.create(
            model="glm-4.5-flash",
            messages=messages,
            tools=self.tools,
            tool_choice="auto"
        )
        message = response.choices[0].message
        # print("message ==> ", message)
        messages.append(message.model_dump())
        print("messages ==>", messages)
        # 处理函数调用
        if message.tool_calls:
            for tool_call in message.tool_calls:
                function_name = tool_call.function.name
                arguments = json.loads(tool_call.function.arguments)

                # 执行函数
                result = self.execute_function(function_name, arguments)
                print("result ==>", result)
                # 添加函数结果
                messages.append({
                    "role": "tool",
                    "content": json.dumps(result, ensure_ascii=False),
                    "tool_call_id": tool_call.id
                })
            # 获取最终回答
            final_response = self.client.chat.completions.create(
                model="glm-4.5-flash",
                messages=messages,
                tools=self.tools,
                stream=True
            )
            append_text = ""
            for chunk in final_response:
                if chunk.choices[0].delta.content:
                    append_text += chunk.choices[0].delta.content
                    yield GenerationChunk(text=chunk.choices[0].delta.content)
        else:
            # 获取最终回答
            final_response = self.client.chat.completions.create(
                model="glm-4.5-flash",
                messages=messages,
                stream=True
            )
            append_text = ""
            for chunk in final_response:
                if chunk.choices[0].delta.content:
                    append_text += chunk.choices[0].delta.content
                    yield GenerationChunk(text=chunk.choices[0].delta.content)


if __name__ == '__main__':
    # print(get_tools_json())
    # print(type(get_tools_json()))
    # print(config)
    # 初始化模型
    llm = GLM_4dot5()
    # 测试工具调用
    # print("测试天气查询:")
    # response = llm.invoke("西安今天的天气怎么样？")
    # print(response)

    # 测试普通对话
    # print("\n测试普通对话:")
    # response = llm.invoke("你好，请介绍一下你自己")
    # print(response)

    # 测试流式响应
    # print("\n测试流式响应:")
    # for chunk in llm.stream("你好"):
    #     print(chunk, end="", flush=True)
    prompt = ChatPromptTemplate.from_messages([
        ("system",
         "从文档\n\"\"\"\n{content}\n\"\"\"\n中找问题\n\"\"\"\n{input}\n\"\"\"\n的答案，找到答案就仅使用文档语句回答问题，找不到答案就结合工具和自身知识回答。\n 不要告诉用户，最终回答是来自于文档或工具。\n 如果没办法回答就如实告知原因"),
        ("user", "{input}")
    ])
    runParallel = RunnableParallel({
        "content": retriever,
        "input": RunnablePassthrough()
    })
    chain = runParallel | prompt | llm
    for chat in chain.stream("今天是阴历几号？"):
        print(chat, end="", flush=True)
