{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from openai import OpenAI\n",
    "client = OpenAI(\n",
    "  base_url=\"http://127.0.0.1:8000/v1\",\n",
    "  api_key = \"none\"\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[registered tool] {'description': 'Generates a random number x, s.t. range[0] <= x < range[1]',\n",
      " 'name': 'random_number_generator',\n",
      " 'params': [{'description': 'The random seed used by the generator',\n",
      "             'name': 'seed',\n",
      "             'required': True,\n",
      "             'type': 'int'},\n",
      "            {'description': 'The range of the generated numbers',\n",
      "             'name': 'range',\n",
      "             'required': True,\n",
      "             'type': 'tuple[int, int]'}]}\n",
      "[registered tool] {'description': 'Get the current weather for `city_name`',\n",
      " 'name': 'get_weather',\n",
      " 'params': [{'description': 'The name of the city to be queried',\n",
      "             'name': 'city_name',\n",
      "             'required': True,\n",
      "             'type': 'str'}]}\n"
     ]
    }
   ],
   "source": [
    "from tool_register import get_tools, dispatch_tool\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "functions = get_tools()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "from loguru import logger\n",
    "from colorama import init, Fore\n",
    "import json\n",
    "import chatglm_cpp\n",
    "\n",
    "def run_conversation(query: str, stream=False, functions=None, max_retry=5):\n",
    "    messages = []\n",
    "    if functions:\n",
    "        TOOL_SYSTEM_PROMPT = (\n",
    "        \"Answer the following questions as best as you can. You have access to the following tools:\\n\"\n",
    "        + json.dumps(functions, indent=4)\n",
    "        )\n",
    "        messages_with_system = {\"role\": \"system\", \"content\": TOOL_SYSTEM_PROMPT}\n",
    "        messages.append(messages_with_system)\n",
    "\n",
    "    messages.append({\"role\": \"user\", \"content\": query})\n",
    "    \n",
    "    \n",
    "\n",
    "    for _ in range(max_retry):\n",
    "        messages_with_system = []\n",
    "        if functions:\n",
    "            TOOL_SYSTEM_PROMPT = (\n",
    "            \"Answer the following questions as best as you can. You have access to the following tools:\\n\"\n",
    "            + json.dumps(functions, indent=4)\n",
    "            )\n",
    "            messages_with_system.append({\"role\": \"system\", \"content\": TOOL_SYSTEM_PROMPT})\n",
    "            messages_with_system += messages\n",
    "        params = dict(model=\"chatglm3\", messages=messages, stream=stream)\n",
    "        response = client.chat.completions.create(**params)\n",
    "        if not stream:\n",
    "            reply_message = response.choices[0].message\n",
    "            messages.append(reply_message)\n",
    "            if not reply_message.tool_calls:\n",
    "                continue\n",
    "            (tool_call,) = reply_message.tool_calls\n",
    "            if tool_call.type == \"function\":\n",
    "                tool_response = dispatch_tool(tool_call.function.name, tool_call.function.arguments)\n",
    "                logger.info(f\"Tool Call Response: {tool_response}\")\n",
    "            if response.choices[0].message.function_call:\n",
    "                function_call = response.choices[0].message.function_call\n",
    "                logger.info(f\"Function Call Response: {function_call.model_dump()}\")\n",
    "\n",
    "                function_args = json.loads(function_call.arguments)\n",
    "                tool_response = dispatch_tool(function_call.name, function_args)\n",
    "                logger.info(f\"Tool Call Response: {tool_response}\")\n",
    "\n",
    "                params[\"messages\"].append(response.choices[0].message)\n",
    "                params[\"messages\"].append(\n",
    "                    {\n",
    "                        \"role\": \"function\",\n",
    "                        \"name\": function_call.name,\n",
    "                        \"content\": tool_response,  # 调用函数返回结果\n",
    "                    }\n",
    "                )\n",
    "            else:\n",
    "                reply = response.choices[0].message.content\n",
    "                logger.info(f\"Final Reply: \\n{reply}\")\n",
    "                return\n",
    "\n",
    "        else:\n",
    "            output = \"\"\n",
    "            chunks = []\n",
    "            for chunk in response:\n",
    "                content = chunk.choices[0].delta.content or \"\"\n",
    "                print(Fore.BLUE + content, end=\"\", flush=True)\n",
    "                output += content\n",
    "                chunks.append(chunk)\n",
    "                if chunk.choices[0].finish_reason == \"stop\":\n",
    "                    return\n",
    "\n",
    "                elif chunk.choices[0].finish_reason == \"function_call\":\n",
    "                    print(\"\\n\")\n",
    "\n",
    "                    function_call = chunk.choices[0].delta.function_call\n",
    "                    logger.info(f\"Function Call Response: {function_call.model_dump()}\")\n",
    "\n",
    "                    function_args = json.loads(function_call.arguments)\n",
    "                    tool_response = dispatch_tool(function_call.name, function_args)\n",
    "                    logger.info(f\"Tool Call Response: {tool_response}\")\n",
    "\n",
    "                    params[\"messages\"].append(\n",
    "                        {\n",
    "                            \"role\": \"assistant\",\n",
    "                            \"content\": output\n",
    "                        }\n",
    "                    )\n",
    "                    params[\"messages\"].append(\n",
    "                        {\n",
    "                            \"role\": \"function\",\n",
    "                            \"name\": function_call.name,\n",
    "                            \"content\": tool_response,\n",
    "                        }\n",
    "                    )\n",
    "\n",
    "                    break\n",
    "\n",
    "        response = client.chat.completions.create(**params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "params = dict(model=\"chatglm3\", messages=[{\"role\": \"user\", \"content\": \"你好\"}])\n",
    "response = client.chat.completions.create(**params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatCompletion(id='chatcmpl', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='你好👋！我是人工智能助手 ChatGLM3-6B，很高兴见到你，欢迎问我任何问题。', role='assistant', function_call=None, tool_calls=None))], created=1705324648, model='default-model', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=29, prompt_tokens=8, total_tokens=37))"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m2024-01-15 22:04:07.644\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36m__main__\u001b[0m:\u001b[36mrun_conversation\u001b[0m:\u001b[36m43\u001b[0m - \u001b[1mFinal Reply: \n",
      "```python\n",
      "tool_call(city_name='北京')\n",
      "```\u001b[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ChatCompletion(id='chatcmpl', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"```python\\ntool_call(city_name='北京')\\n```\", role='assistant', function_call=None, tool_calls=None))], created=1705327447, model='default-model', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=18, prompt_tokens=285, total_tokens=303))\n"
     ]
    }
   ],
   "source": [
    "query = \"帮我查询北京的天气怎么样\"\n",
    "run_conversation(query, functions=functions, stream=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "TOOL_SYSTEM_PROMPT = (\n",
    "            \"Answer the following questions as best as you can. You have access to the following tools:\\n\"\n",
    "            + json.dumps(functions, indent=4)\n",
    "            )\n",
    "messages_with_system = {\"role\": \"system\", \"content\": TOOL_SYSTEM_PROMPT}\n",
    "messages_with_user = {\"role\": \"user\", \"content\": \"帮我查询北京的天气怎么样\"}\n",
    "messages = [messages_with_system, messages_with_user]\n",
    "params = dict(model=\"chatglm3\", messages=messages)\n",
    "response = client.chat.completions.create(**params)\n",
    "reply_message = response.choices[0].message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatCompletionMessage(content=\"```python\\ntool_call(city_name='北京')\\n```\", role='assistant', function_call=None, tool_calls=None)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reply_message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "messages.append({\"role\": reply_message.role, \"content\": reply_message.content})\n",
    "params = dict(model=\"chatglm3\", messages=messages)\n",
    "response = client.chat.completions.create(**params)\n",
    "reply_message = response.choices[0].message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatCompletionMessage(content='北京现在的天气情况如下：温度为20摄氏度，相对湿度为40%，风力为2级，天气状况为晴朗。', role='assistant', function_call=None, tool_calls=None)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reply_message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "messages.append({\"role\": reply_message.role, \"content\": reply_message.content})\n",
    "messages.append({\"role\": \"user\", \"content\": \"查询下上海天气\"})\n",
    "params = dict(model=\"chatglm3\", messages=messages)\n",
    "response = client.chat.completions.create(**params)\n",
    "reply_message = response.choices[0].message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatCompletionMessage(content=\"```python\\ntool_call(city_name='上海')\\n```\", role='assistant', function_call=None, tool_calls=None)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reply_message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "messages.append({\"role\": reply_message.role, \"content\": reply_message.content})\n",
    "params = dict(model=\"chatglm3\", messages=messages)\n",
    "response = client.chat.completions.create(**params)\n",
    "reply_message = response.choices[0].message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ChatCompletionMessage(content='上海现在的天气情况如下：温度为22摄氏度，相对湿度为45%，风力为3级，天气状况为多云。', role='assistant', function_call=None, tool_calls=None)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reply_message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chatglm3-demo",
   "language": "python",
   "name": "chatglm3-demo"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
