from typing import Any, Optional, Type, Iterator
import requests

# 封装工具对象
from langchain.tools import BaseTool
from pydantic import Field, BaseModel
import json

from langchain.llms.base import LLM
import zai


class FessToolParam(BaseModel):
    query: str = Field(description="查询关键词")


class FessTool(BaseTool):
    name: str = "知识搜索"
    description: str = "通过将问题提取为一个关键词搜索，注意：只可以是一个词，可能了解到问题答案的相关信息"
    args_schema: Type[BaseModel] = FessToolParam

    def _run(self, query: str) -> Any:
        url = 'http://localhost:8080/api/v1/documents'
        params = {
            'q': query,
            'wt': 'json',
            'indent': 'true',
            'rows': 1
        }
        response = requests.get(url, params=params)

        record_count = response.json()["record_count"]
        if record_count > 0:
            filetype = response.json()["data"][0]["filetype"]
            url_link: str = response.json()["data"][0]["url_link"]
            digest = response.json()["data"][0]["digest"]
            if filetype == 'txt':
                with open(url_link.replace(r"file://", ""), encoding='utf-8') as f:
                    content = f.read()
                return content
            return digest
        else:
            return "没有搜索到相关的知识，你可以根据你知道的回答。"


from zai import ZhipuAiClient
from langchain_core.outputs import GenerationChunk
from langchain.llms.base import LLM


class GLM_LLMS(LLM):
    client: ZhipuAiClient = ZhipuAiClient(api_key="d0fc2026b50344b18e25187d9393ce3f.P2XsXy1lpeqc2Gl0")
    loacl_model: str = "GLM-4-Flash-250414"

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self) -> str:
        return "GLM_LLMS"

    def _call(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> str:
        messages = [{"role": "user", "content": prompt}]
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
        )
        # print("_call ====response=====> ", response)
        return response.choices[0].message.content

    def _stream(
            self,
            prompt: str,
            stop: Optional[list[str]] = None,
            run_manager=None,
            **kwargs: Any,
    ) -> Iterator:
        agent_output = get_agent().invoke(prompt).get("output")
        # print("_stream ===agent_output======> ", agent_output)
        messages = [
            {"role": "system", "content": f"你可以参考代理返回的数据进行最后的回答。 代理返回如下:{agent_output}"},
            {"role": "user", "content": prompt}
        ]
        # 获取最终回答
        response = self.client.chat.completions.create(
            model=self.loacl_model,
            messages=messages,
            stream=True
        )
        append_text = ""
        for chunk in response:
            if chunk.choices[0].delta.content:
                append_text += chunk.choices[0].delta.content
                yield GenerationChunk(text=chunk.choices[0].delta.content)


from langchain.agents import AgentType, initialize_agent
from langchain_core.tools import tool
from langchain.tools import BaseTool
from pydantic import BaseModel
import subprocess
import platform
import webbrowser


class start_application_param(BaseModel):
    command: str = Field(description="命令行命令")


# 自定义工具：启动应用程序
@tool(description="用户需要打开自己的学习套装、套餐等相近的意思，请执行这个方法", args_schema=start_application_param)
def start_application(command: str) -> str:
    try:
        # 设置隐藏窗口的参数（仅Windows）
        startupinfo = None
        if platform.system() == 'Windows':
            startupinfo = subprocess.STARTUPINFO()
            startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        # 使用 subprocess 启动应用（Windows 系统）
        url1 = r"https://study.163.com/course/courseMain.htm?courseId=1213610807&_trace_c_p_k2_=34deaef12bba434ca9c94e30d9bbb5cf"
        webbrowser.open(url1)
        # subprocess.run(["start", "chrome",
        #                 f'{url1}'],
        #                shell=True, check=True, timeout=5, startupinfo=startupinfo)
        url2 = r"https://vcnlx426ierw.feishu.cn/wiki/E4jOwftyNigvROkSKdRcrUAbnag?fromScene=spaceOverview"
        webbrowser.open(url2)
        # subprocess.run(["start", "chrome",
        #                 f'{url2}'],
        #                shell=True, check=True, timeout=5, startupinfo=startupinfo)
        url3 = r"https://chatglm.cn/main/alltoolsdetail?lang=zh"
        webbrowser.open(url3)
        # subprocess.run(["start", "chrome",
        #                 f'{url3}'],
        #                shell=True, check=True, timeout=5, startupinfo=startupinfo)
        subprocess.run(["start", "Weixin"],
                       shell=True, check=True, timeout=5, startupinfo=startupinfo)
        subprocess.run(["start", "Feishu"],
                       shell=True, check=True, timeout=5, startupinfo=startupinfo)
        return f"已成功启动应用程序网易云课堂，微信，飞书，打开学习套装任务完成"
    except subprocess.TimeoutExpired:
        return f"应用程序 已启动（命令超时，但应用可能正在运行）"
    except subprocess.CalledProcessError as e:
        return f"启动失败: {str(e)}"


def get_agent():
    return initialize_agent(
        tools=[FessTool(), start_application],
        llm=GLM_LLMS(),
        agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
        handle_parsing_errors=False,
        verbose=True,
        return_intermediate_steps=True,  # 可以输出 intermediate_steps 思考的参考数据
    )


if __name__ == '__main__':
    agent = get_agent()
    # shell.run({"commands": ["start 哔哩哔哩"]})
    res = agent.invoke("打开我的学习套装")
    print("+++++++++++++++++++")
    print(res)
    print("+++++++++++++++++++")
