{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import chatglm_cpp\n",
    "from tool_register import get_tools, run_function, get_tool_desc\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "pipeline = chatglm_cpp.Pipeline(\"../chatglm-ggml.bin\")\n",
    "\n",
    "def run_conv_glm(messages, function_list=None, functions=None, model=\"chatglm3-6b\"):\n",
    "    \"\"\"\n",
    "    能够自动执行外部函数调用的Chat对话模型\n",
    "    ：params messages: 必要参数，输入到chat模型的messages参数对象\n",
    "\n",
    "    ：return Chat 模型输出结果\n",
    "    \"\"\"\n",
    "    def is_json_string(s):\n",
    "        try:\n",
    "            json_object = json.load(s)\n",
    "        except ValueError as e:\n",
    "            return False\n",
    "        return True\n",
    "    user_messages = [chatglm_cpp.ChatMessage(role=msg['role'], content=msg['content']) for msg in messages]\n",
    "    #如果没有外部函数库，则执行普通的对话任务\n",
    "    if function_list == None:\n",
    "        chatMessage = pipeline.chat(\n",
    "                user_messages,\n",
    "                max_length=2048,\n",
    "                max_context_length=2048,\n",
    "                do_sample=0.95,\n",
    "                top_k=0,\n",
    "                top_p=0.8,\n",
    "                temperature=0.8,\n",
    "                repetition_penalty=1.0,\n",
    "                num_threads=0,\n",
    "                stream=False,\n",
    "            )\n",
    "        final_response = chatMessage.content\n",
    "    #若存在外部函数库，则需要灵活选取外部函数并进行回答 \n",
    "    else:\n",
    "        # 创建调用外部函数的system_message\n",
    "        messages_with_system = [chatglm_cpp.ChatMessage(role='system', content=get_tool_desc()) ]\n",
    "        messages_with_system += user_messages\n",
    "        # first\n",
    "        chatMessage = pipeline.chat(\n",
    "                messages_with_system,\n",
    "                max_length=2048,\n",
    "                max_context_length=2048,\n",
    "                do_sample=0.95,\n",
    "                top_k=0,\n",
    "                top_p=0.8,\n",
    "                temperature=0.8,\n",
    "                repetition_penalty=1.0,\n",
    "                num_threads=0,\n",
    "                stream=False,\n",
    "            )\n",
    "        if not chatMessage.tool_calls:\n",
    "            final_response = chatMessage.content\n",
    "        else:\n",
    "            (tool_call,) = chatMessage.tool_calls\n",
    "            if tool_call.type == \"function\":\n",
    "                observation = run_function(tool_call.function.name, tool_call.function.arguments)\n",
    "            OBSERVATION_MAX_LENGTH = 1024\n",
    "            if isinstance(observation, str) and len(observation) > OBSERVATION_MAX_LENGTH:\n",
    "                observation = observation[:OBSERVATION_MAX_LENGTH] + \" [TRUNCATED]\"\n",
    "            #拼接外部调用结果\n",
    "            messages_with_system.append(chatglm_cpp.ChatMessage(role=\"observation\", content=observation))\n",
    "            # 二次调用\n",
    "            chatMessage = pipeline.chat(\n",
    "                messages_with_system,\n",
    "                max_length=2048,\n",
    "                max_context_length=2048,\n",
    "                do_sample=0.95,\n",
    "                top_k=0,\n",
    "                top_p=0.8,\n",
    "                temperature=0.8,\n",
    "                repetition_penalty=1.0,\n",
    "                num_threads=0,\n",
    "                stream=False,\n",
    "            )\n",
    "            #获取最终结果\n",
    "            final_response = chatMessage.content\n",
    "\n",
    "    return final_response"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 创建多轮对话函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def chat_with_glm(question, function_list=None, functions=None, model=\"chatglm3-6b\"):\n",
    "    messages = [{\"role\": \"user\", \"content\":question}]\n",
    "    while True:\n",
    "        answer = run_conv_glm(messages=messages,\n",
    "                              function_list= function_list,\n",
    "                              functions= functions,\n",
    "                              model= model\n",
    "                              )\n",
    "        print(f\"模型回答：{answer}\")\n",
    "\n",
    "        # 询问用户是否还有其他问题\n",
    "        question = input(\"您还有其他问题吗？（输入退出以结束对话）：\")\n",
    "        if question == \"退出\":\n",
    "            break\n",
    "\n",
    "        # 记录用户回答\n",
    "        messages.append({\"role\": \"assistant\", \"content\":answer})\n",
    "        messages.append({\"role\": \"user\", \"content\":question})\n",
    "    del messages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{'role': 'user', 'content': '请问今天杭州天气怎么样？'}]\n",
      "模型回答：根据API返回的信息，今天杭州的天气情况为：温度为8摄氏度，相对湿度为81%，天气状况为“晴朗”。这些信息是从API获得的最新数据，因此可以相信它们的准确性。希望这些信息对您有所帮助！\n",
      "[{'role': 'user', 'content': '请问今天杭州天气怎么样？'}, {'role': 'assistant', 'content': '根据API返回的信息，今天杭州的天气情况为：温度为8摄氏度，相对湿度为81%，天气状况为“晴朗”。这些信息是从API获得的最新数据，因此可以相信它们的准确性。希望这些信息对您有所帮助！'}, {'role': 'user', 'content': '那北京呢'}]\n",
      "模型回答：根据API返回的信息，今天北京的天气情况为：温度为-1摄氏度，相对湿度为64%，天气状况为“晴朗”。这些信息是从API获得的最新数据，因此可以相信它们的准确性。希望这些信息对您有所帮助！\n",
      "[{'role': 'user', 'content': '请问今天杭州天气怎么样？'}, {'role': 'assistant', 'content': '根据API返回的信息，今天杭州的天气情况为：温度为8摄氏度，相对湿度为81%，天气状况为“晴朗”。这些信息是从API获得的最新数据，因此可以相信它们的准确性。希望这些信息对您有所帮助！'}, {'role': 'user', 'content': '那北京呢'}, {'role': 'assistant', 'content': '根据API返回的信息，今天北京的天气情况为：温度为-1摄氏度，相对湿度为64%，天气状况为“晴朗”。这些信息是从API获得的最新数据，因此可以相信它们的准确性。希望这些信息对您有所帮助！'}, {'role': 'user', 'content': '杭州比北京更冷吗'}]\n",
      "模型回答：根据API返回的信息，今天杭州的温度为8摄氏度，而北京的温度为-1摄氏度，因此杭州的温度比北京更高。\n"
     ]
    }
   ],
   "source": [
    "chat_with_glm(\"请问今天杭州天气怎么样？\", function_list=get_tools())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chatglm3-demo",
   "language": "python",
   "name": "chatglm3-demo"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
