from typing import Any, Optional, Type, Iterator
import requests

# 封装工具对象
from langchain.tools import BaseTool
from pydantic import Field, BaseModel
import json

from langchain.llms.base import LLM
import zai


class FessToolParam(BaseModel):
    query: str = Field(description="查询关键词")


class FessTool(BaseTool):
    name: str = "知识搜索"
    description: str = "通过将问题提取为一个关键词搜索，注意：只可以是一个词，可能了解到问题答案的相关信息"
    args_schema: Type[BaseModel] = FessToolParam

    def _run(self, query: str) -> Any:
        url = 'http://localhost:8080/api/v1/documents'
        params = {
            'q': query,
            'wt': 'json',
            'indent': 'true',
            'rows': 1
        }
        response = requests.get(url, params=params)

        record_count = response.json()["record_count"]
        if record_count > 0:
            filetype = response.json()["data"][0]["filetype"]
            url_link: str = response.json()["data"][0]["url_link"]
            digest = response.json()["data"][0]["digest"]
            if filetype == 'txt':
                with open(url_link.replace(r"file://", ""), encoding='utf-8') as f:
                    content = f.read()
                return content
            return digest
        else:
            return "没有搜索到相关的知识，你可以根据你知道的回答。"


from zai import ZhipuAiClient
from langchain_core.outputs import GenerationChunk
from langchain.llms.base import LLM


class GLM_LLMS(LLM):
    client: ZhipuAiClient = ZhipuAiClient(api_key="d0fc2026b50344b18e25187d9393ce3f.P2XsXy1lpeqc2Gl0")
    loacl_model: str = "GLM-4-Flash-250414"

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "GLM_LLMS"

    def _call(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> str:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
        )
        # print("_call ====response=====> ", response)
        return response.choices[0].message.content

    def _stream(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> Iterator:
        agent_output = get_agent().invoke(prompt).get("output")
        # print("_stream ===agent_output======> ", agent_output)
        messages = [
            {"role": "system", "content": f"你可以参考代理返回的数据进行最后的回答。 代理返回如下:{agent_output}"},
            {"role": "user", "content": prompt}
        ]
        # 获取最终回答
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
            stream=True
        )
        append_text = ""
        for chunk in response:
            if chunk.choices[0].delta.content:
                append_text += chunk.choices[0].delta.content
                yield GenerationChunk(text=chunk.choices[0].delta.content)


from langchain.agents import AgentType, initialize_agent, Tool
from langchain_experimental.utilities import PythonREPL

python_repl = PythonREPL()

repl_tool = Tool(
    name="Python_REPL",
    func=python_repl.run,
    description="Python代码解释器，输入应该为有效的python命令，要查看计算结果值的输出，应该使用print(...)将action_input, 例如 print(3 * 2 + 5 * 3)"
)

from pydantic import BaseModel
from langchain_core.tools import tool


class run_python_param(BaseModel):
    code: str = Field(description="需要执行的代码")


@tool(description="Python代码解释器，输入应该为有效的python命令，如果想要查看某个值的输出，应该使用print(...)打印出来", args_schema=run_python_param)
def run_python_repl(code: str):
    python_repl = PythonREPL()
    return python_repl.run(f"print({code})")


def run_python_repl_2(code: str):
    python_repl = PythonREPL()
    return python_repl.run(f"print({code})")


@tool(description="Python代码解释器，输入应该为有效的python命令，如果想要查看某个值的输出，应该使用print(...)打印出来", args_schema=run_python_param)
def run_python(code: str):
    return eval(code)


repl_tool_2 = Tool(
    name="Python_REPL_2",
    func=run_python_repl_2,
    description="Python代码解释器，输入应该为有效的python命令，要查看计算结果值的输出，应该使用print(...)将action_input, 例如 print(3 * 2 + 5 * 3)"
)


def get_agent():
    return initialize_agent(
        tools=[FessTool(), repl_tool_2],
        llm=GLM_LLMS(),
        agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
        handle_parsing_errors=False,
        verbose=True,
        return_intermediate_steps=True,  # 可以输出 intermediate_steps 思考的参考数据
    )


if __name__ == '__main__':
    agent = get_agent()
    # shell.run({"commands": ["start 哔哩哔哩"]})
    res = agent.invoke("3个可乐，5个脉动， 可乐3元1个，脉动5元1个，可乐和脉动总共多少钱？")
    print("+++++++++++++++++++")
    print(res)
    print("+++++++++++++++++++")
    # res = PythonREPL().run("3 * 3 + 5 * 5")
    # print(res)
