{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "abce8543-176a-4a58-953d-5148b2c3a1ac",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:05.357049Z",
     "iopub.status.busy": "2025-03-17T14:47:05.356696Z",
     "iopub.status.idle": "2025-03-17T14:47:05.661234Z",
     "shell.execute_reply": "2025-03-17T14:47:05.660816Z",
     "shell.execute_reply.started": "2025-03-17T14:47:05.357017Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from jinja2 import Template\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "98306e65-61d0-471e-bb3d-31ccbeed79ae",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:05.663165Z",
     "iopub.status.busy": "2025-03-17T14:47:05.662623Z",
     "iopub.status.idle": "2025-03-17T14:47:05.868970Z",
     "shell.execute_reply": "2025-03-17T14:47:05.868496Z",
     "shell.execute_reply.started": "2025-03-17T14:47:05.663135Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "data = pd.read_json('./api.jsonl',lines=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "47d62433-7554-4729-81d1-500ecd94311c",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:05.869746Z",
     "iopub.status.busy": "2025-03-17T14:47:05.869548Z",
     "iopub.status.idle": "2025-03-17T14:47:06.067114Z",
     "shell.execute_reply": "2025-03-17T14:47:06.066681Z",
     "shell.execute_reply.started": "2025-03-17T14:47:05.869730Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "api_list= []\n",
    "for  api_l in data['apis']:\n",
    "    for api in api_l:\n",
    "        api_list.append(api)\n",
    "seen=[]\n",
    "unique_list = []\n",
    "for tool in api_list:\n",
    "    if tool['name'] not in seen:\n",
    "        unique_list.append(tool)\n",
    "        seen.append(tool['name'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "cb6ca9e7-6983-4b07-a9cd-1135b29bd85b",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:06.067845Z",
     "iopub.status.busy": "2025-03-17T14:47:06.067639Z",
     "iopub.status.idle": "2025-03-17T14:47:06.263867Z",
     "shell.execute_reply": "2025-03-17T14:47:06.263461Z",
     "shell.execute_reply.started": "2025-03-17T14:47:06.067830Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "seen=[]\n",
    "unique_list = []\n",
    "for tool in api_list:\n",
    "    if tool['name'] not in seen:\n",
    "        unique_list.append(tool)\n",
    "        seen.append(tool['name'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "280fe571-a6ec-47e8-9279-e95759fcd4ac",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:06.264488Z",
     "iopub.status.busy": "2025-03-17T14:47:06.264328Z",
     "iopub.status.idle": "2025-03-17T14:47:06.267937Z",
     "shell.execute_reply": "2025-03-17T14:47:06.267402Z",
     "shell.execute_reply.started": "2025-03-17T14:47:06.264474Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "samlpe_api = random.sample(unique_list,2000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "ec8cd851-f6ca-4d92-a8d2-b9e8ef3b736c",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:06.268831Z",
     "iopub.status.busy": "2025-03-17T14:47:06.268598Z",
     "iopub.status.idle": "2025-03-17T14:47:06.272385Z",
     "shell.execute_reply": "2025-03-17T14:47:06.271927Z",
     "shell.execute_reply.started": "2025-03-17T14:47:06.268816Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "prompt_dialogue = \\\n",
    "\"\"\"根据参考API生成严格符合规范的差异化API，仅返回纯JSON数组：\n",
    "\n",
    "<输入约束>\n",
    "1. 参考API: {{ reference_api }}\n",
    "2. 生成数量: {{ num_apis }}个\n",
    "3. 业务领域: 必须与参考API相同类别\n",
    "4. 参数差异: 类型组合/默认值策略/参数数量必须不同\n",
    "</输入约束>\n",
    "\n",
    "<输出规范>\n",
    "[\n",
    "  {\n",
    "    \"name\": \"api名称\",\n",
    "    \"description\": \"中文功能描述\",\n",
    "    \"parameters\": {\n",
    "      \"type\": \"object\",\n",
    "      \"properties\": {\n",
    "        \"参数1\": {\n",
    "          \"type\": \"integer/boolean/string/number\",\n",
    "          \"description\": \"中文参数说明\",\n",
    "          \"default\": \"可选默认值（类型匹配）\" \n",
    "        }\n",
    "      },\n",
    "      \"required\": [\"必需参数名\"]\n",
    "    }\n",
    "  }\n",
    "]\n",
    "</输出规范>\n",
    "\n",
    "<禁止项>\n",
    "1. 与参考API的相似性：\n",
    "   - 相同前缀动词（generate/validate等）\n",
    "   - 相同业务对象（password/user等）\n",
    "   - 相同参数名（length/symbols等）\n",
    "   \n",
    "2. 参数限制：\n",
    "   - 禁止完全重复参数类型组合\n",
    "   - 禁止相同默认值逻辑（如bool参数默认true）\n",
    "   \n",
    "3. 格式错误：\n",
    "   - 禁止额外文本/注释\n",
    "   - 禁止JSON格式错误\n",
    "   - 禁止中文标点/全角字符\n",
    "</禁止项>\n",
    "\n",
    "<质量示例>\n",
    "# 参考API\n",
    "{\"name\":\"generate_password\",\"description\":\"生成随机密码\",\"parameters\":{\"type\":\"object\",\"properties\":{\"length\":{\"type\":\"integer\"},\"include_symbols\":{\"type\":\"boolean\",\"default\":true}},\"required\":[\"length\"]}\n",
    "\n",
    "# 合格输出\n",
    "[\n",
    "  {\n",
    "    \"name\": \"calculate_shipping_cost\",\n",
    "    \"description\": \"计算物流费用\",\n",
    "    \"parameters\": {\n",
    "      \"type\": \"object\",\n",
    "      \"properties\": {\n",
    "        \"weight\": {\"type\": \"number\", \"description\": \"包裹重量(kg)\"},\n",
    "        \"express\": {\"type\": \"boolean\", \"description\": \"是否加急\", \"default\": false},\n",
    "        \"region\": {\"type\": \"string\", \"description\": \"目的地区域代码\"}\n",
    "      },\n",
    "      \"required\": [\"weight\", \"region\"]\n",
    "    }\n",
    "  }\n",
    "]\n",
    "\n",
    "# 不合格输出\n",
    "/* 错误：包含注释 */\n",
    "[\n",
    "  {\n",
    "    \"name\": \"generate_token\", // 错误：使用相同动词\n",
    "    \"description\": \"生成访问令牌\",\n",
    "    \"parameters\": {\n",
    "      \"type\": \"object\",\n",
    "      \"properties\": {\n",
    "        \"length\": {\"type\": \"integer\"}, // 错误：重复参数名\n",
    "        \"secure\": {\"type\": \"boolean\", \"default\": true} // 错误：相同默认逻辑\n",
    "      },\n",
    "      \"required\": [\"length\"]\n",
    "    }\n",
    "  }\n",
    "]\n",
    "</质量示例>\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d2a4ae6-7a64-4f1b-8b13-6b7e0507a97c",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2025-03-17T14:47:06.273915Z",
     "iopub.status.busy": "2025-03-17T14:47:06.273685Z",
     "iopub.status.idle": "2025-03-17T14:48:40.417687Z",
     "shell.execute_reply": "2025-03-17T14:48:40.416771Z",
     "shell.execute_reply.started": "2025-03-17T14:47:06.273901Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "import openai\n",
    "from jinja2 import Template\n",
    "# from prompt import prompt_dialogue\n",
    "\n",
    "import json\n",
    "import httpx\n",
    "from typing import List, Dict, Callable, Generator, Union\n",
    "from functools import partial\n",
    "from tqdm import tqdm\n",
    "\n",
    "def configure_openai_client(url: str, api_key: str) -> None:\n",
    "    openai.api_key = api_key\n",
    "    openai.base_url = url\n",
    "    openai.proxy = \"\"\n",
    "\n",
    "\n",
    "class LLMClient:\n",
    "    def __init__(\n",
    "        self,\n",
    "        url: str,\n",
    "        api_key: str = \"EMPTY\",\n",
    "        chat_mode: bool = True,\n",
    "        buffer_size: int = 20,\n",
    "        timeout: int = 60\n",
    "    ):\n",
    "        configure_openai_client(url, api_key)\n",
    "        self.chat_mode = chat_mode\n",
    "        self.buffer_size = buffer_size\n",
    "        self.timeout = timeout\n",
    "        self.llm_function = openai.chat.completions.create\n",
    "\n",
    "    def _prepare_params(\n",
    "        self,\n",
    "        messages: List[Union[Dict, str]],\n",
    "        stream: bool,\n",
    "        **kwargs\n",
    "    ) -> Dict:\n",
    "        params = {\n",
    "            **kwargs,\n",
    "            \"stream\": stream,\n",
    "            \"timeout\": self.timeout\n",
    "        }\n",
    "        if self.chat_mode:\n",
    "            params[\"messages\"] = messages\n",
    "        else:\n",
    "            params[\"prompt\"] = messages\n",
    "        return params\n",
    "\n",
    "    def generate(\n",
    "        self,\n",
    "        messages: List[Union[Dict, str]],\n",
    "        stream: bool = False,\n",
    "        **kwargs\n",
    "    ) -> Union[str, Generator[str, None, None]]:\n",
    "        params = self._prepare_params(messages, stream, **kwargs)\n",
    "        \n",
    "        if not stream:\n",
    "            response = self.llm_function(**params)\n",
    "            return response.choices[0].message.content\n",
    "\n",
    "        return self._handle_streaming_output(params)\n",
    "\n",
    "    def _handle_streaming_output(self, params: Dict) -> Generator[str, None, None]:\n",
    "        buffer = \"\"\n",
    "        for chunk in self.llm_function(**params):\n",
    "            if len(chunk.choices) > 0 and chunk.choices[0].delta.content:\n",
    "                buffer += chunk.choices[0].delta.content\n",
    "                if len(buffer) >= self.buffer_size:\n",
    "                    yield buffer\n",
    "                    buffer = \"\"\n",
    "        if buffer:\n",
    "            yield buffer\n",
    "if __name__ == \"__main__\":\n",
    "    i = 0\n",
    "    df = pd.DataFrame()\n",
    "    for api_l in tqdm(samlpe_api):\n",
    "\n",
    "        t = Template(prompt_dialogue)\n",
    "        prompt = t.render(reference_api=api_l, num_apis=1)\n",
    "        model_url = \"\"\n",
    "        llm_client = LLMClient(model_url, \"\")\n",
    "        response = llm_client.generate(\n",
    "            messages=[{\"role\": \"user\", \"content\": prompt}],\n",
    "            model='glm-4-air',\n",
    "            stream=False,\n",
    "            temperature = 0.8\n",
    "        )\n",
    "        print('======================================')\n",
    "        print(api_l)\n",
    "        print('======================================')\n",
    "        print(response)\n",
    "        try:\n",
    "            json.loads(response)\n",
    "        except ValueError:\n",
    "            continue\n",
    "        new_row = pd.DataFrame([{'conversations':response}])\n",
    "        df = pd.concat([df,new_row],ignore_index=True)\n",
    "    df.to_json('./hecheng_api.json',orient='records',force_ascii=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "12508c41-8168-4060-9650-fb810e99858c",
   "metadata": {
    "execution": {
     "iopub.status.busy": "2025-03-17T14:48:40.418180Z",
     "iopub.status.idle": "2025-03-17T14:48:40.418401Z",
     "shell.execute_reply": "2025-03-17T14:48:40.418303Z",
     "shell.execute_reply.started": "2025-03-17T14:48:40.418294Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "df.to_json('./hecheng_api.json',orient='records',force_ascii=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "xiayou",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
