{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "2a50fe47",
   "metadata": {},
   "source": [
    "# RapidAPI接口\n",
    "RapidAPI 是一个 API 市场，提供了数千个 API，可以帮助开发人员快速找到并使用需要的 API。它包括多个类别，如人工智能、云计算、区块链、金融、游戏等，每个类别下面都有大量的 API 接口可供选择。在 RapidAPI 的平台上，你可以搜索、筛选、订阅和使用这些 API 接口，还可以查看 API 的文档和使用示例，从而更好地了解和使用它们。\n",
    "我们利用Rapid API平台上bing搜索引擎提供的api来拿到浏览器上的最新数据，以此来作为大模型的新的数据来源。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a7505050",
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "\n",
    "RapidAPIKey = \"4747ae4f1msh26b54cefc0a56f5p1f8284jsn46704b3264a2\"\n",
    "\n",
    "class DeepSearch:\n",
    "    def search(query: str = \"\"):\n",
    "        query = query.strip()\n",
    "\n",
    "        #if query == \"\":\n",
    "        #    return \"\"\n",
    "\n",
    "        if RapidAPIKey == \"\":\n",
    "            return \"请配置你的 RapidAPIKey\"\n",
    "\n",
    "        \n",
    "        url = \"https://bing-web-search1.p.rapidapi.com/search\"\n",
    "\n",
    "        querystring = {\"q\":query,\"mkt\":\"en-us\",\"safeSearch\":\"Off\",\"textFormat\":\"Raw\",\"freshness\":\"Day\"}\n",
    "\n",
    "        headers = {\n",
    "        \"X-BingApis-SDK\": \"true\",\n",
    "        \"X-RapidAPI-Key\": \"84747ae4f1msh26b54cefc0a56f5p1f8284jsn46704b3264a2\",\n",
    "        \"X-RapidAPI-Host\": \"bing-web-search1.p.rapidapi.com\"\n",
    "        }\n",
    "\n",
    "        response = requests.get(url, headers=headers, params=querystring)\n",
    "\n",
    "        #print(response.json())\n",
    "        data_list = response.json()['value']\n",
    "\n",
    "        if len(data_list) == 0:\n",
    "            print(\"没查到\")\n",
    "            return \"\"\n",
    "        else:\n",
    "            result_arr = []\n",
    "            result_str = \"\"\n",
    "            count_index = 0\n",
    "            data_size=len(data_list)\n",
    "            if data_size >4:\n",
    "                for i in range(4):\n",
    "                    item = data_list[i]\n",
    "                    title = item[\"name\"]\n",
    "                    description = item[\"description\"]\n",
    "                    item_str = f\"{title}: {description}\"\n",
    "                    result_arr = result_arr + [item_str]\n",
    "            else:\n",
    "                for i in range(data_size):\n",
    "                    item = data_list[i]\n",
    "                    title = item[\"name\"]\n",
    "                    description = item[\"description\"]\n",
    "                    item_str = f\"{title}: {description}\"\n",
    "                    result_arr = result_arr + [item_str]\n",
    "            result_str = \"\\n\".join(result_arr)\n",
    "            print(\"查到了\")\n",
    "            return result_str\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e967f738",
   "metadata": {},
   "source": [
    "# CustomLLM\n",
    "   langchain 默认的模型是 OpenAI的ChatGPT。对于局域网应用来说，因为信息安全的要求私有数据不能出网关，所以需要搭本地模型。其实整个应用的硬件成本最高的就是 LLM 的部署，最经济的方式就是一个局域网一个类型的 LLM 统一部署一个，为了保障硬件的充分利用。LLM 和 Langchain 分开部署的最大好处就是灵活性，其实Langchain 已经是一个非常棒的设计样板了，langchain 只做资源整合，任何重存储和重计算的服务全部在远端部署，给 langchain 的应用留足生长的空间。\n",
    "   使用 LangChain 可以方便使用 OpenAI 的模型借口，但是由于各种限制，导致使用有所不便，那么我们可以考虑使用自定义的本地模型，其核心在于构建 langchain.llms.base.LLM 的子类 CustomLLM 并重写_call 函数如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4f42d2c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Any, List, Dict, Mapping, Optional\n",
    "import json\n",
    "\n",
    "from langchain.callbacks.manager import CallbackManagerForLLMRun\n",
    "import requests\n",
    "from langchain.llms.base import LLM\n",
    "from langchain.callbacks.base import Callbacks\n",
    "from transformers import AutoModel,AutoTokenizer\n",
    "\n",
    "\n",
    "class CustomLLM(LLM):\n",
    "\n",
    "    logging: bool = False\n",
    "    output_keys: List[str] = [\"output\"]\n",
    "\n",
    "    llm_type: str = \"Llama-2-7b\"\n",
    "\n",
    "    @property\n",
    "    def _llm_type(self) -> str:\n",
    "        return self.llm_type\n",
    "\n",
    "    def log(self, log_str):\n",
    "        if self.logging:\n",
    "            #print(log_str)\n",
    "        else:\n",
    "            return\n",
    "\n",
    "    def _call(\n",
    "        self,\n",
    "        prompt: str,\n",
    "        stop: Optional[List[str]] = None,\n",
    "        run_manager: Optional[CallbackManagerForLLMRun] = None,\n",
    "    ) -> str:\n",
    "        \n",
    "        response = requests.post(f'http://10.1.36.75:8080/', {\n",
    "        \"ask\": prompt\n",
    "        })\n",
    "        response=response.text\n",
    "        return response\n",
    "\n",
    "    @property\n",
    "    def _identifying_params(self) -> Mapping[str, Any]:\n",
    "        \"\"\"Get the identifying parameters.\"\"\"\n",
    "        return {\"n\": 10}"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07d235a2",
   "metadata": {},
   "source": [
    "# CustomPromptTemplate\n",
    "\n",
    "而对于控制推理决策和汇总答案两个动作到底怎么拼装不同的 Prompt，必须要定义新的 CustomPromptTemplate 类，他继承了 StringPromptTemplate。其实只要重写一个 format 函数就行，langchain 会自动把每次 LLM 返回作为参数传递给 format 函数。我只要判断哪是首次向 LLM 询问决策，哪是后续的拼装答案。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fab71d4d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "欢迎使用Llama-2-7b模型，输入内容即可进行对话，clear清空对话记录，stop终止程序：\n"
     ]
    }
   ],
   "source": [
    "from langchain.agents import Tool\n",
    "from langchain.tools import BaseTool\n",
    "from langchain import PromptTemplate, LLMChain\n",
    "from langchain.agents import BaseSingleActionAgent, AgentOutputParser, LLMSingleActionAgent, AgentExecutor\n",
    "from typing import List, Tuple, Any, Union, Optional, Type\n",
    "from langchain.schema import AgentAction, AgentFinish\n",
    "from langchain.prompts import StringPromptTemplate\n",
    "from langchain.callbacks.manager import CallbackManagerForToolRun\n",
    "import re\n",
    "\n",
    "agent_template = \"\"\"\n",
    "你现在是一个{role}。这里是一些已知信息：\n",
    "{related_content}\n",
    "{background_infomation}\n",
    "{question_guide}：{input}\n",
    "\n",
    "{answer_format}\n",
    "\"\"\"\n",
    "\n",
    "class CustomPromptTemplate(StringPromptTemplate):\n",
    "    template: str\n",
    "    tools: List[Tool]\n",
    "    def format(self, **kwargs) -> str:\n",
    "        intermediate_steps = kwargs.pop(\"intermediate_steps\")\n",
    "        if len(intermediate_steps) == 0:\n",
    "            background_infomation = \"\\n\"\n",
    "            role = \"傻瓜机器人\"\n",
    "            question_guide = \"我现在有一个问题\"\n",
    "            answer_format = \"\"\"请你只回答\\\"DeepSearch(\"搜索词\")\\\"，并将\"搜索词\"替换为你认为需要搜索的关键词，除此之外不要回答其他任何内容。\\n\\n下面请回答我上面提出的问题！\"\"\"\n",
    "        else:\n",
    "            background_infomation = \"\\n\\n你还有这些已知信息作为参考：\\n\\n\"\n",
    "            action, observation = intermediate_steps[0]\n",
    "            background_infomation += f\"{observation}\\n\"\n",
    "            role = \"聪明的 AI 助手\"\n",
    "            question_guide = \"请根据这些已知信息回答我的问题\"\n",
    "            answer_format = \"\"\n",
    "        kwargs[\"background_infomation\"] = background_infomation\n",
    "        kwargs[\"role\"] = role\n",
    "        kwargs[\"question_guide\"] = question_guide\n",
    "        kwargs[\"answer_format\"] = answer_format\n",
    "        return self.template.format(**kwargs)\n",
    "\n",
    "class CustomSearchTool(BaseTool):\n",
    "    name: str = \"DeepSearch\"\n",
    "    description: str = \"\"\n",
    "\n",
    "    def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None):\n",
    "        return DeepSearch.search(query = query)\n",
    "\n",
    "    async def _arun(self, query: str):\n",
    "        raise NotImplementedError(\"DeepSearch does not support async\")\n",
    "\n",
    "class CustomAgent(BaseSingleActionAgent):\n",
    "    @property\n",
    "    def input_keys(self):\n",
    "        return [\"input\"]\n",
    "\n",
    "    def plan(self, intermedate_steps: List[Tuple[AgentAction, str]],\n",
    "            **kwargs: Any) -> Union[AgentAction, AgentFinish]:\n",
    "        return AgentAction(tool=\"DeepSearch\", tool_input=kwargs[\"input\"], log=\"\")\n",
    "\n",
    "class CustomOutputParser(AgentOutputParser):\n",
    "    def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:\n",
    "        match = re.match(r'^[\\s\\w]*(DeepSearch)\\(([^\\)]+)\\)', llm_output, re.DOTALL)\n",
    "        if not match:\n",
    "            return AgentFinish(\n",
    "                return_values={\"output\": llm_output.strip()},\n",
    "                log=llm_output,\n",
    "            )\n",
    "        else:\n",
    "            action = match.group(1).strip()\n",
    "            action_input = match.group(2).strip()\n",
    "            return AgentAction(tool=action, tool_input=action_input.strip(\" \").strip('\"'), log=llm_output)\n",
    "\n",
    "\n",
    "class DeepAgent:\n",
    "    tool_name: str = \"DeepSearch\"\n",
    "    agent_executor: any\n",
    "    tools: List[Tool]\n",
    "    llm_chain: any\n",
    "    def query(self, related_content: str = \"\", query: str = \"\"):\n",
    "        tool_name = self.tool_name\n",
    "        result = self.agent_executor.run(related_content=related_content, input=query ,tool_name=self.tool_name)\n",
    "        return result\n",
    "\n",
    "    def __init__(self, **kwargs):\n",
    "        llm = CustomLLM()\n",
    "        tools = [\n",
    "                    Tool.from_function(\n",
    "                        func=DeepSearch.search,\n",
    "                        name=\"DeepSearch\",\n",
    "                        description=\"\"\n",
    "                    )\n",
    "                ]\n",
    "        self.tools = tools\n",
    "        tool_names = [tool.name for tool in tools]\n",
    "        output_parser = CustomOutputParser()\n",
    "        prompt = CustomPromptTemplate(template=agent_template,\n",
    "                                      tools=tools,\n",
    "                                      input_variables=[\"related_content\",\"tool_name\", \"input\", \"intermediate_steps\"])\n",
    "\n",
    "        llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
    "        self.llm_chain = llm_chain\n",
    "\n",
    "        agent = LLMSingleActionAgent(\n",
    "            llm_chain=llm_chain,\n",
    "            output_parser=output_parser,\n",
    "            stop=[\"\\nObservation:\"],\n",
    "            allowed_tools=tool_names\n",
    "        )\n",
    "\n",
    "        agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)\n",
    "        self.agent_executor = agent_executor\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "\n",
    "    llm = CustomLLM()\n",
    "    tools = [\n",
    "                Tool.from_function(\n",
    "                    func=DeepSearch.search,\n",
    "                    name=\"DeepSearch\",\n",
    "                    description=\"\"\n",
    "                )\n",
    "            ]\n",
    "    tool_names = [tool.name for tool in tools]\n",
    "    output_parser = CustomOutputParser()\n",
    "    prompt = CustomPromptTemplate(template=agent_template,\n",
    "                                  tools=tools,\n",
    "                                  input_variables=[\"related_content\",\"tool_name\", \"input\", \"intermediate_steps\"])\n",
    "\n",
    "    llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
    "\n",
    "    agent = LLMSingleActionAgent(\n",
    "        llm_chain=llm_chain,\n",
    "        output_parser=output_parser,\n",
    "        stop=[\"\\nObservation:\"],\n",
    "        allowed_tools=tool_names\n",
    "    )\n",
    "\n",
    "    agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)\n",
    "    print(\"欢迎使用Llama-2-7b模型，输入内容即可进行对话，clear清空对话记录，stop终止程序：\")\n",
    "    while True:\n",
    "        query=input(\"\\n用户：\")\n",
    "        print(agent_executor.run(related_content=\"\", input=query, tool_name=\"DeepSearch\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0d4253be",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "28b67b5e",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
