{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "initial_id",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:53:44.115336Z",
     "start_time": "2025-01-09T09:53:44.111349Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from rag import *\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "6d5df67272b8b40b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:57:20.049609Z",
     "start_time": "2025-01-09T09:57:20.041823Z"
    }
   },
   "outputs": [],
   "source": [
    "def search_chunks_by_paper_id(paper_ids):\n",
    "    paper_id2chunks_list = []\n",
    "    for paper_id in paper_ids:\n",
    "        abstract, introduction, related_works = '', '', ''\n",
    "        result = query_by_paper_id(paper_id=paper_id, top_k=10)\n",
    "        paper_title = result[0]['paper_title']\n",
    "        for i in range(len(result)):\n",
    "            text = result[i]['chunk_text']\n",
    "            # 判断是否存在 \"Introduction\"（不区分大小写）\n",
    "            if re.search(r'Abstract', text, re.IGNORECASE):\n",
    "                # 按照 \"Introduction\" 分割字符串（不区分大小写）\n",
    "                parts = re.split(r'Abstract', text, flags=re.IGNORECASE)\n",
    "                abstract = 'Abstract\\n' + parts[1]\n",
    "\n",
    "            # 判断是否存在 \"Introduction\"（不区分大小写）\n",
    "            if re.search(r'introduction', text, re.IGNORECASE):\n",
    "                # 按照 \"Introduction\" 分割字符串（不区分大小写）\n",
    "                parts = re.split(r'introduction', text, flags=re.IGNORECASE)\n",
    "                introduction = 'Introduction\\n' + parts[1]\n",
    "\n",
    "            if re.search(r'i ntroduction', text, re.IGNORECASE):\n",
    "                # 按照 \"Introduction\" 分割字符串（不区分大小写）\n",
    "                parts = re.split(r'i ntroduction', text, flags=re.IGNORECASE)\n",
    "                introduction = 'Introduction\\n' + parts[1]\n",
    "\n",
    "            # 判断是否存在 \"Introduction\"（不区分大小写）\n",
    "            if re.search(r'Related Work', text, re.IGNORECASE):\n",
    "                # 按照 \"Introduction\" 分割字符串（不区分大小写）\n",
    "                parts = re.split(r'Related Work', text, flags=re.IGNORECASE)\n",
    "                related_works = 'Related Works\\n' + parts[1]\n",
    "\n",
    "        paper_id2chunks = {\n",
    "            'paper_id': paper_id,\n",
    "            'paper_title': paper_title,\n",
    "            'abstract': abstract,\n",
    "            'introduction': introduction,\n",
    "            'related_works': related_works\n",
    "        }\n",
    "        paper_id2chunks_list.append(paper_id2chunks)\n",
    "    return paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "2e5d10bbfc9d9d49",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:57:21.773306Z",
     "start_time": "2025-01-09T09:57:21.769572Z"
    }
   },
   "outputs": [],
   "source": [
    "def do_rag(keyword):\n",
    "    paper_ids_set = set()\n",
    "    result = search_papers(query=keyword, top_k=100)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['entity']['paper_id'])\n",
    "    result = query_by_title_contain(title=keyword, top_k=100)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['paper_id'])\n",
    "    result = query_by_chunk_contain(chunk=keyword, top_k=100)\n",
    "    for i in range(len(result)):\n",
    "        paper_ids_set.add(result[i]['paper_id'])\n",
    "    paper_id2chunks_list = search_chunks_by_paper_id(list(paper_ids_set))\n",
    "    return paper_id2chunks_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "a4f37b31408a04bf",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:00:29.555424Z",
     "start_time": "2025-01-09T09:00:29.550835Z"
    }
   },
   "outputs": [],
   "source": [
    "def write_outline(keyword):\n",
    "    rag_result = do_rag('Aspect Based Sentiment Analysis')\n",
    "\n",
    "    # 初始化变量\n",
    "    strings = []  # 用于存储最终的字符串列表\n",
    "    current_string = \"\"  # 当前正在构建的字符串\n",
    "\n",
    "    # 遍历 paper_id2chunks_list\n",
    "    for paper_id2chunks in rag_result:\n",
    "        # 将 paper_id2chunks 中的内容拼接成一个字符串\n",
    "        content = (\n",
    "            f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "            f\"{paper_id2chunks['abstract']}\\n\"\n",
    "            f\"{paper_id2chunks['introduction']}\\n\"\n",
    "            f\"{paper_id2chunks['related_works']}\\n\"\n",
    "        )\n",
    "\n",
    "        # 如果当前字符串加上新内容后长度超过 100,000，则将当前字符串添加到 strings 列表中，并开始一个新的字符串\n",
    "        if len(current_string) + len(content) > 100000:\n",
    "            strings.append(current_string)\n",
    "            current_string = content\n",
    "        else:\n",
    "            current_string += content\n",
    "\n",
    "    # 将最后一个字符串添加到 strings 列表中\n",
    "    if current_string:\n",
    "        strings.append(current_string)\n",
    "\n",
    "    # 输出结果\n",
    "    for i, string in enumerate(strings):\n",
    "        print(f\"String {i + 1} (length: {len(string)}):\\n{string}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "a9d30c5b5611db79",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:37:12.711736Z",
     "start_time": "2025-01-09T09:36:32.942391Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "111\n"
     ]
    }
   ],
   "source": [
    "rag_result = do_rag('Sentiment')\n",
    "print(len(rag_result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "aca789ed78add6d4",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:32:23.434629Z",
     "start_time": "2025-01-09T09:32:23.430347Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'paper_id': '645dad15d68f896efad9dd6c',\n",
       " 'paper_title': 'PVT-SSD: Single-Stage 3D Object Detector with Point-Voxel Transformer',\n",
       " 'abstract': 'Abstract\\n\\nRecent Transformer-based 3D object detectors learn point cloud features either from point- or voxel-based representations. However, the former requires time-consuming sampling while the latter introduces quantization errors. In this paper, we present a novel Point-Voxel Transformer for single-stage 3D detection (PVT-SSD) that takes advantage of these two representations. Specifically, we first use voxel-based sparse convolutions for efficient feature encoding. Then, we propose a Point-Voxel Transformer (PVT) module that obtains long-range contexts in a cheap manner from voxels while attaining accurate positions from points. The key to associating the two different representations is our introduced input-dependent Query Initialization module, which could efficiently generate reference points and content queries. Then, PVT adaptively fuses long-range contextual and local geometric information around reference points into content queries. Further, to quickly find the neighboring points of reference points, we design the Virtual Range Image module, which generalizes the native range image to multi-sensor and multi-frame. The experiments on several autonomous driving benchmarks verify the effectiveness and efficiency of the proposed method. Code will be available at https://github.com/ Nightmare-n/PVT-SSD .\\n\\n# 1. Introduction\\n3D object detection from point clouds has become increasingly popular thanks to its wide applications, e.g., autonomous driving and virtual reality. To process unordered point clouds, Transformer [ 51 ] has recently attracted great interest as the self-attention is invariant to the permutation of inputs. However, due to the quadratic complexity of self-attention, it involves extensive computation and memory budgets when processing large point clouds. To overcome this problem, some point-based methods [ 29 ,36 ,37 ] perform attention on downsampled point sets, while some voxel-based methods [ 10 ,33 ,64 ] employ attention on local non-empty voxels. Nevertheless, the former requires farthest point sampling (FPS) [ 41 ] to sample point clouds, which is time-consuming on large-scale outdoor scenes [ 19 ], while the latter inevitably introduces quantization errors during voxelization, which loses accurate position information.  \\n\\nIn this paper, we propose PVT-SSD that absorbs the advantages of the above two representations, i.e., voxels and points, while overcoming their drawbacks. To this end, instead of sampling points directly, we convert points to a small number of voxels through sparse convolutions and sample non-empty voxels to reduce the runtime of FPS. Then, inside the PVT-SSD, voxel features are adaptively fused with point features to make up for the quantization error. In this way, both long-range contexts from voxels and accurate positions from points are preserved. Specifically, PVT-SSD consists of the following components:  \\n\\nFirstly, we propose an input-dependent Query Initialization module inspired by previous indoor Transformerbased detectors [ 29 ,36 ], which provides queries with better initial positions and instance-related features. Unlike [ 29 ,36 ], our queries originate from non-empty voxels instead of points to reduce the sampling time. Concretely, with the 3D voxels generated by sparse convolutions, we first collapse 3D voxels into 2D voxels by merging voxels along the height dimension to further reduce the number of voxels. The sample operation is then applied to select a representative set of voxels. We finally lift sampled 2D voxels to generate 3D reference points. Subsequently, the corresponding content queries are obtained in an efficient way by projecting reference points onto a BEV feature map and indexing features at the projected locations.  \\n\\nSecondly, we introduce a Point-Voxel Transformer module that captures long-range contextual features from voxel tokens and extracts fine-grained point features from point tokens. To be specific, the voxel tokens are obtained from non-empty voxels around reference points to cover a large attention range. In contrast, the point tokens are generated from neighboring points near reference points to retain fine-grained information. These two different tokens are adaptively fused by the cross-attention layer based on the similarity with content queries to complement each other.  \\n\\nFurthermore, we design a Virtual Range Image module to accelerate the neighbor querying process in the pointvoxel Transformer. With the constructed range image, reference points can quickly find their neighbors based on range image coordinates. Unlike the native range image captured by LiDAR sensors, we can handle situations where multiple points overlap on the same pixel in the range image. Therefore, it can be used for complex scenarios, such as multiple sensors and multi-frame fusion.  \\n\\nExtensive experiments have been conducted on several detection benchmarks to verify the efficacy and efficiency of our approach. PVT-SSD achieves competitive results on KITTI [ 13 ], Waymo Open Dataset [ 48 ], and nuScenes [ 3 ].\\n\\n# 2. Related Work\\n3D Object Detection from Point Clouds. Current 3D detectors can be mainly divided into voxel-, point-, and point-voxel-based methods. To process irregular 3D point clouds, voxel-based methods [ 7 ,16 ,53 ,56 ,61 ,74 ,75 ] project them onto regular voxels. VoxelNet [ 76 ] leverages PointNet [ 40 ] to generate a voxel-wise representation and applies standard 3D and 2D convolutions for object detection. PointPillars [ 21 ] simplifies the voxels to pillars. CenterPoint [ 70 ] estimates the centers of objects using a keypoint detector and removes the need for axis-aligned anchor boxes. Though voxel-based methods achieve good detection performance with promising efficiency, voxelization inevitably introduces quantization errors.  \\n\\nPoint-based methods [ 45 –47 ,66 ] overcome this by directly operating on raw point clouds. VoteNet [ 39 ] detects 3D objects through Hough voting and clustering. 3DSSD [ 65 ] proposes a hybrid sampling strategy by utilizing both feature and geometry distance for better classification performance. Some approaches [ 6 ,52 ,72 ] use objectness scores rather than feature distance to improve the foreground points ratio after downsampling. It is generally time-consuming to repeatedly apply sampling and grouping operations on large-scale point clouds.  \\n\\nPoint-voxel-based methods [ 35 ,63 ,69 ] take advantage of the efficiency of 3D sparse convolutions while preserving accurate point locations. PV-RCNN [ 43 ] and its variants [ 44 ] extract point-wise features from voxel abstraction networks to refine the proposals generated from the 3D voxel backbone. Pyramid R-CNN [ 32 ] collects points for each RoI in a pyramid manner.  \\n\\n3D Object Detection from Range Images. There are some prior works [ 2 ,24 ,50 ] that attempt to predict 3D boxes from raw representations captured by LiDAR sensors, i.e., 2D perspective range images. LaserNet [ 34 ] applies traditional 2D convolutions to range images to directly regress boxes. RangeDet [ 12 ] and PPC [ 5 ] introduce point-based convolution kernels to capture 3D geometric information from 2D range view representation. The representation of range images is compact and dense, and free of quantization errors, which inspires us to use it to speed up the ball query algorithm [ 41 ] widely used in point-based methods. The difference from earlier methods is that our constructed virtual range images can handle more complex situations, such as point clouds from multi-frame or multi-sensor.  \\n\\nPoint Cloud Analysis by Transformer. Transformer [ 51 ] has demonstrated its great success in many computer vision tasks [ 4 ,78 ]. Recent approaches [ 20 ,25 ,25 ,28 ,37 ,38 ,67 ,73 ,77 ] also explore it for point cloud analysis. Pointformer [ 37 ] proposes local and global attention modules to process 3D point clouds. Group-Free [ 29 ] eliminates hand-crafted grouping schemes by applying an attention module on all the points. 3DETR [ 36 ] develops an end-to-end Transformer-based detection model with minimal 3D specific inductive biases. VoTr [ 33 ] introduces a voxel-based Transformer that adopts both local and dilated attention to enlarge receptive fields of the model. SST [ 10 ]extends the shifted window [ 26 ] to 3D scenes and employs self-attention on non-empty voxels within the window. Object DGCNN [ 54 ] incorporates grid-based BEV features around queries through deformable attention [ 78 ]. VISTA [ 8 ] adaptively fuses global multi-view features via an attention module. Despite the effectiveness, they often fail to capture fine patterns of point clouds due to voxelization. CT3D [ 42 ] builds Transformer on top of a two-stage detector and operates attention on the points grouped by RoIs. EQ-PVRCNN [ 64 ] takes proposal grids as queries and generates RoI features from a voxel-based backbone.',\n",
       " 'introduction': 'Introduction\\n\\n3D object detection from point clouds has become increasingly popular thanks to its wide applications, e.g., autonomous driving and virtual reality. To process unordered point clouds, Transformer [ 51 ] has recently attracted great interest as the self-attention is invariant to the permutation of inputs. However, due to the quadratic complexity of self-attention, it involves extensive computation and memory budgets when processing large point clouds. To overcome this problem, some point-based methods [ 29 ,36 ,37 ] perform attention on downsampled point sets, while some voxel-based methods [ 10 ,33 ,64 ] employ attention on local non-empty voxels. Nevertheless, the former requires farthest point sampling (FPS) [ 41 ] to sample point clouds, which is time-consuming on large-scale outdoor scenes [ 19 ], while the latter inevitably introduces quantization errors during voxelization, which loses accurate position information.  \\n\\nIn this paper, we propose PVT-SSD that absorbs the advantages of the above two representations, i.e., voxels and points, while overcoming their drawbacks. To this end, instead of sampling points directly, we convert points to a small number of voxels through sparse convolutions and sample non-empty voxels to reduce the runtime of FPS. Then, inside the PVT-SSD, voxel features are adaptively fused with point features to make up for the quantization error. In this way, both long-range contexts from voxels and accurate positions from points are preserved. Specifically, PVT-SSD consists of the following components:  \\n\\nFirstly, we propose an input-dependent Query Initialization module inspired by previous indoor Transformerbased detectors [ 29 ,36 ], which provides queries with better initial positions and instance-related features. Unlike [ 29 ,36 ], our queries originate from non-empty voxels instead of points to reduce the sampling time. Concretely, with the 3D voxels generated by sparse convolutions, we first collapse 3D voxels into 2D voxels by merging voxels along the height dimension to further reduce the number of voxels. The sample operation is then applied to select a representative set of voxels. We finally lift sampled 2D voxels to generate 3D reference points. Subsequently, the corresponding content queries are obtained in an efficient way by projecting reference points onto a BEV feature map and indexing features at the projected locations.  \\n\\nSecondly, we introduce a Point-Voxel Transformer module that captures long-range contextual features from voxel tokens and extracts fine-grained point features from point tokens. To be specific, the voxel tokens are obtained from non-empty voxels around reference points to cover a large attention range. In contrast, the point tokens are generated from neighboring points near reference points to retain fine-grained information. These two different tokens are adaptively fused by the cross-attention layer based on the similarity with content queries to complement each other.  \\n\\nFurthermore, we design a Virtual Range Image module to accelerate the neighbor querying process in the pointvoxel Transformer. With the constructed range image, reference points can quickly find their neighbors based on range image coordinates. Unlike the native range image captured by LiDAR sensors, we can handle situations where multiple points overlap on the same pixel in the range image. Therefore, it can be used for complex scenarios, such as multiple sensors and multi-frame fusion.  \\n\\nExtensive experiments have been conducted on several detection benchmarks to verify the efficacy and efficiency of our approach. PVT-SSD achieves competitive results on KITTI [ 13 ], Waymo Open Dataset [ 48 ], and nuScenes [ 3 ].\\n\\n# 2. Related Work\\n3D Object Detection from Point Clouds. Current 3D detectors can be mainly divided into voxel-, point-, and point-voxel-based methods. To process irregular 3D point clouds, voxel-based methods [ 7 ,16 ,53 ,56 ,61 ,74 ,75 ] project them onto regular voxels. VoxelNet [ 76 ] leverages PointNet [ 40 ] to generate a voxel-wise representation and applies standard 3D and 2D convolutions for object detection. PointPillars [ 21 ] simplifies the voxels to pillars. CenterPoint [ 70 ] estimates the centers of objects using a keypoint detector and removes the need for axis-aligned anchor boxes. Though voxel-based methods achieve good detection performance with promising efficiency, voxelization inevitably introduces quantization errors.  \\n\\nPoint-based methods [ 45 –47 ,66 ] overcome this by directly operating on raw point clouds. VoteNet [ 39 ] detects 3D objects through Hough voting and clustering. 3DSSD [ 65 ] proposes a hybrid sampling strategy by utilizing both feature and geometry distance for better classification performance. Some approaches [ 6 ,52 ,72 ] use objectness scores rather than feature distance to improve the foreground points ratio after downsampling. It is generally time-consuming to repeatedly apply sampling and grouping operations on large-scale point clouds.  \\n\\nPoint-voxel-based methods [ 35 ,63 ,69 ] take advantage of the efficiency of 3D sparse convolutions while preserving accurate point locations. PV-RCNN [ 43 ] and its variants [ 44 ] extract point-wise features from voxel abstraction networks to refine the proposals generated from the 3D voxel backbone. Pyramid R-CNN [ 32 ] collects points for each RoI in a pyramid manner.  \\n\\n3D Object Detection from Range Images. There are some prior works [ 2 ,24 ,50 ] that attempt to predict 3D boxes from raw representations captured by LiDAR sensors, i.e., 2D perspective range images. LaserNet [ 34 ] applies traditional 2D convolutions to range images to directly regress boxes. RangeDet [ 12 ] and PPC [ 5 ] introduce point-based convolution kernels to capture 3D geometric information from 2D range view representation. The representation of range images is compact and dense, and free of quantization errors, which inspires us to use it to speed up the ball query algorithm [ 41 ] widely used in point-based methods. The difference from earlier methods is that our constructed virtual range images can handle more complex situations, such as point clouds from multi-frame or multi-sensor.  \\n\\nPoint Cloud Analysis by Transformer. Transformer [ 51 ] has demonstrated its great success in many computer vision tasks [ 4 ,78 ]. Recent approaches [ 20 ,25 ,25 ,28 ,37 ,38 ,67 ,73 ,77 ] also explore it for point cloud analysis. Pointformer [ 37 ] proposes local and global attention modules to process 3D point clouds. Group-Free [ 29 ] eliminates hand-crafted grouping schemes by applying an attention module on all the points. 3DETR [ 36 ] develops an end-to-end Transformer-based detection model with minimal 3D specific inductive biases. VoTr [ 33 ] introduces a voxel-based Transformer that adopts both local and dilated attention to enlarge receptive fields of the model. SST [ 10 ]extends the shifted window [ 26 ] to 3D scenes and employs self-attention on non-empty voxels within the window. Object DGCNN [ 54 ] incorporates grid-based BEV features around queries through deformable attention [ 78 ]. VISTA [ 8 ] adaptively fuses global multi-view features via an attention module. Despite the effectiveness, they often fail to capture fine patterns of point clouds due to voxelization. CT3D [ 42 ] builds Transformer on top of a two-stage detector and operates attention on the points grouped by RoIs. EQ-PVRCNN [ 64 ] takes proposal grids as queries and generates RoI features from a voxel-based backbone.',\n",
       " 'related_works': 'Related Works\\n\\n3D Object Detection from Point Clouds. Current 3D detectors can be mainly divided into voxel-, point-, and point-voxel-based methods. To process irregular 3D point clouds, voxel-based methods [ 7 ,16 ,53 ,56 ,61 ,74 ,75 ] project them onto regular voxels. VoxelNet [ 76 ] leverages PointNet [ 40 ] to generate a voxel-wise representation and applies standard 3D and 2D convolutions for object detection. PointPillars [ 21 ] simplifies the voxels to pillars. CenterPoint [ 70 ] estimates the centers of objects using a keypoint detector and removes the need for axis-aligned anchor boxes. Though voxel-based methods achieve good detection performance with promising efficiency, voxelization inevitably introduces quantization errors.  \\n\\nPoint-based methods [ 45 –47 ,66 ] overcome this by directly operating on raw point clouds. VoteNet [ 39 ] detects 3D objects through Hough voting and clustering. 3DSSD [ 65 ] proposes a hybrid sampling strategy by utilizing both feature and geometry distance for better classification performance. Some approaches [ 6 ,52 ,72 ] use objectness scores rather than feature distance to improve the foreground points ratio after downsampling. It is generally time-consuming to repeatedly apply sampling and grouping operations on large-scale point clouds.  \\n\\nPoint-voxel-based methods [ 35 ,63 ,69 ] take advantage of the efficiency of 3D sparse convolutions while preserving accurate point locations. PV-RCNN [ 43 ] and its variants [ 44 ] extract point-wise features from voxel abstraction networks to refine the proposals generated from the 3D voxel backbone. Pyramid R-CNN [ 32 ] collects points for each RoI in a pyramid manner.  \\n\\n3D Object Detection from Range Images. There are some prior works [ 2 ,24 ,50 ] that attempt to predict 3D boxes from raw representations captured by LiDAR sensors, i.e., 2D perspective range images. LaserNet [ 34 ] applies traditional 2D convolutions to range images to directly regress boxes. RangeDet [ 12 ] and PPC [ 5 ] introduce point-based convolution kernels to capture 3D geometric information from 2D range view representation. The representation of range images is compact and dense, and free of quantization errors, which inspires us to use it to speed up the ball query algorithm [ 41 ] widely used in point-based methods. The difference from earlier methods is that our constructed virtual range images can handle more complex situations, such as point clouds from multi-frame or multi-sensor.  \\n\\nPoint Cloud Analysis by Transformer. Transformer [ 51 ] has demonstrated its great success in many computer vision tasks [ 4 ,78 ]. Recent approaches [ 20 ,25 ,25 ,28 ,37 ,38 ,67 ,73 ,77 ] also explore it for point cloud analysis. Pointformer [ 37 ] proposes local and global attention modules to process 3D point clouds. Group-Free [ 29 ] eliminates hand-crafted grouping schemes by applying an attention module on all the points. 3DETR [ 36 ] develops an end-to-end Transformer-based detection model with minimal 3D specific inductive biases. VoTr [ 33 ] introduces a voxel-based Transformer that adopts both local and dilated attention to enlarge receptive fields of the model. SST [ 10 ]extends the shifted window [ 26 ] to 3D scenes and employs self-attention on non-empty voxels within the window. Object DGCNN [ 54 ] incorporates grid-based BEV features around queries through deformable attention [ 78 ]. VISTA [ 8 ] adaptively fuses global multi-view features via an attention module. Despite the effectiveness, they often fail to capture fine patterns of point clouds due to voxelization. CT3D [ 42 ] builds Transformer on top of a two-stage detector and operates attention on the points grouped by RoIs. EQ-PVRCNN [ 64 ] takes proposal grids as queries and generates RoI features from a voxel-based backbone.'}"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rag_result[3]\n",
    "# 计算 paper_id2chunks_list 中的所有字符个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "2ddaa2ac565535d5",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T06:30:43.103332Z",
     "start_time": "2025-01-10T06:30:40.225868Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Knowledge Graph Augmented Network Towards Multiview Representation Learning for Aspect-based Sentiment Analysis\n",
      "Aspect-oriented Opinion Alignment Network for Aspect-Based Sentiment Classification\n",
      "MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction.\n",
      "Tagging-Assisted Generation Model with Encoder and Decoder Supervision for Aspect Sentiment Triplet Extraction\n",
      "AMR-based Network for Aspect-based Sentiment Analysis.\n",
      "M2DF: Multi-grained Multi-curriculum Denoising Framework for Multimodal Aspect-based Sentiment Analysis\n",
      "Aspect-Based Sentiment Analysis with Explicit Sentiment Augmentations\n",
      "Target-to-Source Augmentation for Aspect Sentiment Triplet Extraction\n",
      "AX-MABSA: A Framework for Extremely Weakly Supervised Multi-label Aspect Based Sentiment Analysis\n",
      "A Span-level Bidirectional Network for Aspect Sentiment Triplet Extraction\n",
      "10\n"
     ]
    }
   ],
   "source": [
    "num = 0\n",
    "paper_id_list = []\n",
    "for paper in rag_result:\n",
    "    # print(paper['paper_title'])\n",
    "    if 'Aspect' in paper['paper_title']:\n",
    "        paper_id_list.append(paper['paper_id'])\n",
    "        print(paper['paper_title'])\n",
    "        num += 1\n",
    "rag_result = search_chunks_by_paper_id(paper_id_list)\n",
    "print(len(rag_result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "faceccd3c6b8179a",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:57:41.522066Z",
     "start_time": "2025-01-09T09:57:41.517802Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'paper_id': '61e0e9d05244ab9dcb28ca74',\n",
       " 'paper_title': 'Knowledge Graph Augmented Network Towards Multiview Representation Learning for Aspect-based Sentiment Analysis',\n",
       " 'abstract': 'Abstract\\n (difficult) concepts. For rare difficult words, we could comprehend their meanings via their relevant normal words. Inspired by this phenomenon, we employ WordNet as prior knowledge for sentence understanding. For example, the “Tinca” is the subordinate of the “fish genus”, which can be directly related to aspects such as “fish” or “food”, thus alleviating the difficulty of comprehending the sentence.  \\n\\nDifferent from Zhou et al. [17], which directly employs the graph-structure data of the knowledge base, we introduce a simple and efficient strategy to process the knowledge graphs. specifically, semantic matching approaches (see the analysis of different approaches in Sec. 4.3.3) for the task of knowledge graph embedding (KGE) [49] are used to model the semantic relations of knowledge graphs into distributed representations, i.e. , learned knowledge embeddings. In practice, given the graph data in the form of “entity-relation-entity” triples, we train the entity embeddings using an open KGE toolkit, OpenKE 3 [50]. Subsequently, we use the trained knowledge embeddings to initialize a new embedding matrix and then represent the words of $S$ and $T$ with the knowledge embedding matrix. The mapped knowledge embeddings are then concatenated with the hidden state vectors $H_{s}{}^{4}$ . To establish the connection of $S$ and $T$ in knowledge embedding space, we further employ a soft attention mechanism to calculate the semantic relatedness of each word in $S$ and $T$ and capture the most important semantic features as aspectspecific knowledge representations, denoted as $R_{k}$ . For better understanding, taking the sentence “Try the local food , especially the decent Tinca.” and the aspect word “food” as an example, the process of the knowledge branch is illustrated in Fig. 4. Notably, since the context word “Tinca” is the subordinate of the aspect “food” and they are also adjacent to each other in the knowledge embedding space, KGAN could easily capture their relatedness and make the correct prediction.  \\n\\nNotably, it also should be noted that the training of these branches is not independent. Specifically, given an input sentencethe same embedding matrix to convert the aspect pair $\\\\{S,\\\\,T\\\\}$ , the contextual and synta Sand c bra Tinto the hes apply corresponding word embeddings, while the knowledge branch uses another knowledge embedding matrix to map the input entities to knowledge embeddings. As a result, through the parallel processing of three branches, KGAN can capture the aspectspecific information from multiple perspectives simultaneously.\\n\\n# 3.4 Hierarchical Fusion Module\\nSince the above representations different views, directly fusing them may scarcely take advantage $\\\\{R_{c},R_{s},R_{k}\\\\}$ are obtained from of their complementarity. To this end, we adopt a hierarchical fusion module to synergistically fuse these representations in a local-to-global manner, which could effectively boost the performance. An illustration of this fusion module is shown in Fig. 5. For ease of illustration, we employ the “input” to represent the procedures of multiple branches.  \\n\\nIn the local fusion procedure, we first concatenate two of the three feature representations in rows, i.e. ,$[R_{c};\\\\;R_{s}],\\\\;[R_{c};\\\\;R_{k}]$ and $[R_{s};\\\\,R_{k}]$ , where “;” denotes vector concatenation operator. The fused representations are fed into three separate fully connected layers to obtain the predicted sentiment features, denoted as $R_{c s}$ ,$R_{c k}$ and $R_{s k}$ . It is noteworthy that we do not share the parameters of these fully connected layers. Subsequently, to make full use of the complementarity between multiple sentiment features, we further fuse them at the global level. Specifically, $\\\\left[R_{c s},R_{c k},R_{s k}\\\\right]^{T}$ the obtained sentiment features are concatenated in columns, \\', and we feed them into a $3{^\\\\ast}3$ convolution layer i.e. ,to selectively incorporate these features.  \\n\\nThrough the above local and global fusion procedures, we can make the feature representations benefit from each other step by step. In this way, external knowledge could be better integrated with contextual and syntactic information, thus achieving more promising performance.\\n\\n# 3. https://github.com/thunlp/OpenKE.\\n4. Such a process can not only fuse the heterogeneous features (text and graph), but also alleviate the negative effect of sparsity and inaccuracy of knowledge embeddings.  \\n\\n  \\nFig. 5. Illustration of the hierarchical fusion module.  \\n\\nTABLE 1 Statistics of evaluated aspect-level datasets.   \\n\\n\\n<html><body><table><tr><td>Datasets</td><td>Division</td><td>#Positive</td><td>#Negative</td><td>#Neutral</td></tr><tr><td rowspan=\"2\">Laptop14</td><td>Train</td><td>980</td><td>858</td><td>454</td></tr><tr><td>Test</td><td>340</td><td>128</td><td>171</td></tr><tr><td rowspan=\"2\">Restaurant14</td><td>Train</td><td>2159</td><td>800</td><td>632</td></tr><tr><td>Test</td><td>730</td><td>195</td><td>196</td></tr><tr><td rowspan=\"2\">Twitter</td><td>Train</td><td>1567</td><td>1563</td><td>3127</td></tr><tr><td>Test</td><td>174</td><td>174</td><td>346</td></tr><tr><td rowspan=\"2\">Restaurant15</td><td>Train</td><td>912</td><td>256</td><td>36</td></tr><tr><td>Test</td><td>326</td><td>182</td><td>34</td></tr><tr><td rowspan=\"2\">Restaurant16</td><td>Train</td><td>1240</td><td>439</td><td>69</td></tr><tr><td>Test</td><td>469</td><td>117</td><td>30</td></tr></table></body></html>  \\n\\nLast, we cast the output of the convolution layer as the final sentiment prediction, namely, $p$ , and employ the following crossentropy loss function to guide the optimization and training.  \\n\\n$$\\n\\\\mathcal{L}=-\\\\sum_{i}\\\\sum_{j}y_{i}^{j}\\\\log(p_{i}^{j}),\\n$$  \\n\\nwhere $i$ indexes the instance of the ABSA dataset, and $j$ indexes the sentiment polarity.',\n",
       " 'introduction': 'Introduction\\n\\nA Sa fine-grained task of sentiment analysis, aspect-based sentiment analysis (ABSA) has grown to be an active research task in the community of natural language understanding (NLU) [1], [2], [3]. In particular, ABSA refers to judging the sentiment polarities ( e.g. , positive, neutral, and negative) towards the given aspects, which are usually the target entities appearing in the sentence [4]. Taking the sentence “The food was good, but the service was poor.” as an example, as shown in Fig. 1(a), the goal of ABSA is to predict the polarities “positive” and “negative” for the aspects food and service , respectively.  \\n\\nRecent ABSA modeling approaches are mainly based on deep neural networks (DNNs) owing to the capability of automatically extracting semantic features [5]. Specifically, based on the type of learned feature representations, existing DNNs for ABSA can be classified into two groups: context-based methods [6], [7], [8] and syntax-based methods [9], [10], [11]. Context-based methods first employ convolutional neural networks (CNNs) or long short-term memory networks (LSTMs) to extract the features of aspects and context words and then use the attention mechanism to capture the aspect-specific contextual representations. In addition to contextbased methods, syntax-based methods attempt to model the nonlocal dependency trees (a case in point is shown in Fig. 1(b)) of sentences with graph neural networks, e.g. , graph convolutional networks (GCNs) to encode the syntactic information and syntactically connect the aspects with related opinion words [12].  \\n\\n  \\nFig. 1. (a) An example sentence of the ABSA task from the restaurant reviews. There are two aspects with opposite sentiment polarities in this sentence. (b) Illustration of the dependency parsing result.  \\n\\nMore recently, given effective knowledge, e.g. , linguistic and commonsense, for representation approaches in NLU tasks [13], [14], [15], researchers employ external knowledge to augment the semantic features in ABSA models [16], [17], [18], [19]. However, they make extensive modifications to model structures or objectives to encode the different kinds of knowledge, limiting the applicability of their methods to a broader range of tasks and knowledge types. For example, Zhou et al. [17] directly utilized the words ( w.r.t. aspect terms in sentences ) in knowledge graphs as the seed nodes and selected the related nodes to construct the subgraphs. While these subgraph-based methods [17], [20] have achieved remarkable performance, there are still some problems, e.g. , the process of constructing subgraphs is usually relatively complex and would bring more computation, especially when there are many aspect terms. Hence, we attempt to integrate external knowledge from a different perspective.  \\n\\nIn this paper, we propose a novel knowledge graph augmented network, namely, KGAN, to integrate external knowledge for boosting the performance of ABSA task. In general, KGAN employs three parallel branches to learn the feature representations from multiple perspectives ( i.e. , context-, syntax- and knowledgebased). The contextual and syntactic branches are used to extract the explicit context and syntax information from the labeled ABSA data, respectively, as most existing ABSA models do. More specifically, in the knowledge branch, unlike the above previous methods that usually employ complicated approaches to encode the knowledge, we recast them with a simpler and more efficient strategy to incorporate the external knowledge. In practice, instead of directly operating on graph-structure data, we first integrate external knowledge graphs into low-dimensional continuous embeddings, which can be simply and efficiently used to represent sentences and aspects. Then, based on the knowledge embeddings, a soft attention mechanism is utilized to capture the aspect-specific knowledge representations. As a result, we can obtain multiple representations that establish the relations between aspects and opinion words from different views. To take full advantage of the complementarity of these multiview representations, we introduce a novel hierarchical fusion module to effectively fuse them.  \\n\\nWe conduct a comprehensive evaluation of KGAN on SemEval2014 ( i.e. , Laptop14 and Restaurant14), SemEval2015 ( i.e. ,Restaurant15), SemEval2016 ( i.e. , Restaurant16) and Twitter benchmarks. The experimental results show that KGAN achieves comparable performance compared to the prior SOTA model with the GloVe-based setting. Moreover, we also investigate and demonstrate the effectiveness and robustness of our KGAN in BERT- and RoBERTa-based settings. In particular, based on RoBERTa, our model achieves the SOTA performance among all datasets in terms of accuracy and macro-F1 score. More specifically, compared to the prior SOTA models, the accuracy improvements of KGAN on Twitter, Restaurant15 and Restaurant15 datasets are up to $2.49\\\\%$ ,$3.28\\\\%$ and $2.06\\\\%$ , respectively. Finally, we also compare KGAN with the other models in terms of latency and model size and prove that KGAN can achieve a good trade-off between efficiency and performance.  \\n\\nThe main contributions can be summarized as follows:  \\n\\n1) We propose a novel knowledge graph augmented network (KGAN), where different types of information are encoded as multiview representations to augment the semantic features, thus boosting the performance of ABSA.   \\n2) To achieve better complementarity between multiview features, we design a novel hierarchical fusion module to effectively fuse them.   \\n3) Experiments on several commonly used ABSA benchmarks show the effectiveness and universality of our proposed KGAN. In combination with pretrained models, i.e. ,RoBERTa, we achieve new state-of-the-art performance on these benchmarks.  \\n\\nThe rest of this paper is organized as follows. In Sec. 2, we briefly review the related works. In Sec. 3, we introduce our proposed method in detail. Sec. 4 reports and discusses our experimental results. Lastly, we conclude our study in Sec. 5.',\n",
       " 'related_works': 'Related Works\\nS\\n\\n# 2.1 Aspect-based Sentiment Analysis\\nBenefiting from the representation learned from the training data, DNN-based ABSA models have shown promising performance compared to handcrafted feature-based models. We categorize them into two classes, e.g. , context- and syntax-based methods.  \\n\\nFirst, considering the easily obtained contextual information, using CNNs [6], [21], [22], [23], [24] and LSTMs [7], [16], [25], [26], [27], [28] to extract the aspect-specific feature representations from context has become the mainstream approach for ABSA. In particular, owing to the ability to learn sequential patterns, the target-dependent LSTM (TD-LSTM) was proposed by Tang et al. [25] to capture the aspect information. TD-LSTM simplifies connecting the aspect with all context words, neglecting the effect of relative opinion words. Therefore, Wang et al. [26] improved upon the TD-LSTM by introducing an attention mechanism to explore the potential correlations between aspects and opinion words. In the study of Ma et al. [27], two separate LSTMs were used to encode the context and aspect terms, and then an interactive attention mechanism was further proposed to extract the more relevant information between the context and aspect features.  \\n\\nOn the other hand, considering the complexity and inefficiency of LSTM-like sequential models, many studies have attempted to employ more efficient CNNs to capture the compositional structure and n-gram features. Xue and Li [21] proposed a gated convolution network to extract the contextual features and employed the gate mechanism to selectively output the final sentiment features. Huang and Carley [23] introduced two neural units, i.e. ,the parameterized filter and parameterized gate, to incorporate aspect information into CNN. Notably, in CNN-based methods, it is common to employ the average of aspect embeddings as the aspect representation, which would cause the loss of sequence information. To address this issue, Li et al. [6] introduced a targetspecific transformation component based on CNNs to better learn the target-specific representation.  \\n\\nHowever, due to the challenge of multiple aspects with different polarities in a sentence, context-based models usually confuse the connections between aspects and related opinion words. To this end, most recent efforts focus on leveraging the syntactic structure of the sentence to effectively establish the connection [9], [10], [11], [12], [29], [30], [31]. In practice, syntactic dependency trees are introduced to represent the sentence, and then GNNs are used to model the dependency trees and encode the syntactic information. Zhang et al. [9] first utilized dependency trees to represent sentences and then proposed graph convolution networks (GCNs) to exploit syntactical information from dependency trees. Additionally, to better connect the aspect and opinion words syntactically, Wang et al. [12] presented a novel aspect-oriented dependency tree structure and employed a relational graph attention network to encode the tree structure. In addition, regarding sentences that have no remarkable syntactic structure, Pang et al. [30] introduced a multichannel GCN to optimally fuse syntactic and semantic information and their combinations simultaneously. Similarly, in the study of Li et al. [11], a dual GCN model that consists of SemGCN and SynGCN modules was used to take advantage of the complementarity of syntax structure and semantic correlations.  \\n\\n  \\nFig. 2. The architecture of our proposed knowledge graph augmented network (KGAN), which leverages external knowledge graphs to augment contextual and syntactic information. The $R_{c}$ ,$R_{s}$ and $R_{k}$ denote the context- (left), syntax- (middle) and knowledge-based (right) representations, respectively. In the knowledge branch, ANALOGY and DistMult refer to the approaches of Knowledge Graph Embeddings (KGE). The GloVe/BERT/RoBERTa is used to convert the sentence/aspect into word embeddings.\\n\\n# 2.2 Incorporating External Knowledge\\nSince linguistic and commonsense knowledge can be beneficial to understanding natural language, incorporating this knowledge into deep learning models has become an active topic in many fields [13], [14], [32], [33], [34]. A case in point is the ERNIE [32], which employed the large-scale corpora and knowledge graphs to train a knowledge-enhanced pretraining language model. ERNIE experimentally achieves great performance on various knowledgedriven downstream tasks.  \\n\\nHowever, in the task of ABSA, the existing methods fall short in exploring the knowledge to augment the sentiment analysis. One main reason for this is that the above knowledge is not explicitly expressed in the ABSA datasets. Therefore, some recent studies attempt to incorporate external knowledge to alleviate this issue [16], [17], [18], [20], [35], [36], [37]. Wu et al. [35] proposed a unified model to integrate sentiment and structure knowledge with contextual representations for better performance. Zhou et al. [17] proposed jointly encoding syntactic information and external commonsense knowledge, where the knowledge was sampled via the individual nodes. Moreover, in the study of Xing et al. [34], a knowledge-enhanced BERT was introduced to obtain representations enhanced with sentiment domain knowledge to improve ABSA performance.  \\n\\nFollowing this line of research, we introduce knowledge graphs to explicitly provide external knowledge for ABSA. This idea is relatively similar to AR-BERT [20], which incorporates information on aspect-aspect relations in knowledge graphs to improve the performance of existing ABSA models. While ARBERT [20] can achieve encouraging performance with the help of a large-scale knowledge graph, its main focus is on modeling aspect relations (captured by a complex method) from large knowledge graphs. In contrast, we start from the multiview learning perspective and propose a novel ABSA model that uses a simpler and more efficient strategy to model knowledge graphs. Additionally, instead of only integrating external knowledge with contextual or syntactic information, we synergistically combine the knowledge with both contextual and syntactic information to obtain richer feature representations and effectively boost the performance of sentiment analysis.'}"
      ]
     },
     "execution_count": 65,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rag_result[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "570b8ce18e58c68c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T08:24:48.528626Z",
     "start_time": "2025-01-09T08:24:48.522695Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total number of characters in paper_id2chunks_list: 749077\n"
     ]
    }
   ],
   "source": [
    "# 假设 paper_id2chunks_list 已经被定义为一个列表，并且包含了一些 paper_id2chunks 字典\n",
    "# paper_id2chunks_list = [...]\n",
    "\n",
    "# 初始化字符总数\n",
    "total_chars = 0\n",
    "\n",
    "# 遍历列表中的每个字典\n",
    "for paper_chunk in rag_result:\n",
    "    # 遍历字典中的每个键值对\n",
    "    for value in paper_chunk.values():\n",
    "        # 检查值是否为字符串类型\n",
    "        if isinstance(value, str):\n",
    "            # 如果是字符串，则累加其长度\n",
    "            total_chars += len(value)\n",
    "\n",
    "# 输出总字符数\n",
    "print(f\"Total number of characters in paper_id2chunks_list: {total_chars}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "29e579122b7ab495",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-09T09:02:30.091818Z",
     "start_time": "2025-01-09T09:02:30.082882Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "String 1 (length: 91154)\n",
      "String 2 (length: 82486)\n",
      "String 3 (length: 98072)\n",
      "String 4 (length: 95630)\n",
      "String 5 (length: 82110)\n",
      "String 6 (length: 88819)\n",
      "String 7 (length: 98685)\n",
      "String 8 (length: 96416)\n",
      "String 9 (length: 15320)\n"
     ]
    }
   ],
   "source": [
    "# 初始化变量\n",
    "related_paper_list = []  # 用于存储最终的字符串列表\n",
    "cur_content = \"\"  # 当前正在构建的字符串\n",
    "\n",
    "# 遍历 paper_id2chunks_list\n",
    "for paper_id2chunks in rag_result:\n",
    "    # 将 paper_id2chunks 中的内容拼接成一个字符串\n",
    "    content = (\n",
    "        f\"paper_title: {paper_id2chunks['paper_title']}\\n\"\n",
    "        f\"{paper_id2chunks['abstract']}\\n\"\n",
    "        f\"{paper_id2chunks['introduction']}\\n\"\n",
    "        f\"{paper_id2chunks['related_works']}\\n\"\n",
    "    )\n",
    "\n",
    "    # 如果当前字符串加上新内容后长度超过 100,000，则将当前字符串添加到 strings 列表中，并开始一个新的字符串\n",
    "    if len(cur_content) + len(content) > 100000:\n",
    "        related_paper_list.append(cur_content)\n",
    "        cur_content = content\n",
    "    else:\n",
    "        cur_content += content\n",
    "\n",
    "# 将最后一个字符串添加到 strings 列表中\n",
    "if cur_content:\n",
    "    related_paper_list.append(cur_content)\n",
    "\n",
    "total_chars = sum(len(s) for s in related_paper_list)\n",
    "# 输出结果\n",
    "for i, string in enumerate(related_paper_list):\n",
    "    print(f\"String {i + 1} (length: {len(string)})\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "id": "6a7a81db4131b976",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T02:18:22.852007Z",
     "start_time": "2025-01-10T02:18:22.847552Z"
    }
   },
   "outputs": [],
   "source": [
    "ROUGH_OUTLINE_PROMPT = '''\n",
    "You wants to write a overall and comprehensive academic survey about \"Aspect Based Sentiment Analysis\".\\n\\\n",
    "You are provided with a list of papers related to the topic below:\\n\\\n",
    "---\n",
    "[PAPER LIST]\n",
    "---\n",
    "\n",
    "You need to draft a outline based on the given papers.\n",
    "The outline should contains a title and several sections.\n",
    "Each section follows with a brief sentence to describe what to write in this section.\n",
    "The outline is supposed to be comprehensive and contains 5 sections.\n",
    "\n",
    "Return in the format:\n",
    "<format>\n",
    "Title: [TITLE OF THE SURVEY]\n",
    "Section 1: [NAME OF SECTION 1]\n",
    "Description 1: [DESCRIPTION OF SENTCTION 1]\n",
    "\n",
    "Section 2: [NAME OF SECTION 2]\n",
    "Description 2: [DESCRIPTION OF SENTCTION 2]\n",
    "\n",
    "...\n",
    "\n",
    "Section K: [NAME OF SECTION K]\n",
    "Description K: [DESCRIPTION OF SENTCTION K]\n",
    "</format>\n",
    "The outline:\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "id": "759cc89eb1783a18",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T02:42:02.879917Z",
     "start_time": "2025-01-10T02:18:46.286538Z"
    }
   },
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[74], line 20\u001b[0m\n\u001b[0;32m      8\u001b[0m data \u001b[38;5;241m=\u001b[39m {\n\u001b[0;32m      9\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mqwen2.5-14b-4bit\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m     10\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m: [\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m     16\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstream\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m     17\u001b[0m }\n\u001b[0;32m     19\u001b[0m \u001b[38;5;66;03m# 发送POST请求\u001b[39;00m\n\u001b[1;32m---> 20\u001b[0m response \u001b[38;5;241m=\u001b[39m requests\u001b[38;5;241m.\u001b[39mpost(url, json\u001b[38;5;241m=\u001b[39mdata)\n\u001b[0;32m     22\u001b[0m \u001b[38;5;66;03m# 检查响应状态码\u001b[39;00m\n\u001b[0;32m     23\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mstatus_code \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m200\u001b[39m:\n\u001b[0;32m     24\u001b[0m     \u001b[38;5;66;03m# 解析响应内容\u001b[39;00m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\requests\\api.py:115\u001b[0m, in \u001b[0;36mpost\u001b[1;34m(url, data, json, **kwargs)\u001b[0m\n\u001b[0;32m    103\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mpost\u001b[39m(url, data\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, json\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m    104\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124mr\u001b[39m\u001b[38;5;124;03m\"\"\"Sends a POST request.\u001b[39;00m\n\u001b[0;32m    105\u001b[0m \n\u001b[0;32m    106\u001b[0m \u001b[38;5;124;03m    :param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    112\u001b[0m \u001b[38;5;124;03m    :rtype: requests.Response\u001b[39;00m\n\u001b[0;32m    113\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m--> 115\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m request(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpost\u001b[39m\u001b[38;5;124m\"\u001b[39m, url, data\u001b[38;5;241m=\u001b[39mdata, json\u001b[38;5;241m=\u001b[39mjson, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\requests\\api.py:59\u001b[0m, in \u001b[0;36mrequest\u001b[1;34m(method, url, **kwargs)\u001b[0m\n\u001b[0;32m     55\u001b[0m \u001b[38;5;66;03m# By using the 'with' statement we are sure the session is closed, thus we\u001b[39;00m\n\u001b[0;32m     56\u001b[0m \u001b[38;5;66;03m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001b[39;00m\n\u001b[0;32m     57\u001b[0m \u001b[38;5;66;03m# cases, and look like a memory leak in others.\u001b[39;00m\n\u001b[0;32m     58\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m sessions\u001b[38;5;241m.\u001b[39mSession() \u001b[38;5;28;01mas\u001b[39;00m session:\n\u001b[1;32m---> 59\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m session\u001b[38;5;241m.\u001b[39mrequest(method\u001b[38;5;241m=\u001b[39mmethod, url\u001b[38;5;241m=\u001b[39murl, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\requests\\sessions.py:589\u001b[0m, in \u001b[0;36mSession.request\u001b[1;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[0;32m    584\u001b[0m send_kwargs \u001b[38;5;241m=\u001b[39m {\n\u001b[0;32m    585\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m: timeout,\n\u001b[0;32m    586\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mallow_redirects\u001b[39m\u001b[38;5;124m\"\u001b[39m: allow_redirects,\n\u001b[0;32m    587\u001b[0m }\n\u001b[0;32m    588\u001b[0m send_kwargs\u001b[38;5;241m.\u001b[39mupdate(settings)\n\u001b[1;32m--> 589\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msend(prep, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39msend_kwargs)\n\u001b[0;32m    591\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resp\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\requests\\sessions.py:703\u001b[0m, in \u001b[0;36mSession.send\u001b[1;34m(self, request, **kwargs)\u001b[0m\n\u001b[0;32m    700\u001b[0m start \u001b[38;5;241m=\u001b[39m preferred_clock()\n\u001b[0;32m    702\u001b[0m \u001b[38;5;66;03m# Send the request\u001b[39;00m\n\u001b[1;32m--> 703\u001b[0m r \u001b[38;5;241m=\u001b[39m adapter\u001b[38;5;241m.\u001b[39msend(request, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m    705\u001b[0m \u001b[38;5;66;03m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[0;32m    706\u001b[0m elapsed \u001b[38;5;241m=\u001b[39m preferred_clock() \u001b[38;5;241m-\u001b[39m start\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\requests\\adapters.py:667\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[1;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[0;32m    664\u001b[0m     timeout \u001b[38;5;241m=\u001b[39m TimeoutSauce(connect\u001b[38;5;241m=\u001b[39mtimeout, read\u001b[38;5;241m=\u001b[39mtimeout)\n\u001b[0;32m    666\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 667\u001b[0m     resp \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39murlopen(\n\u001b[0;32m    668\u001b[0m         method\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mmethod,\n\u001b[0;32m    669\u001b[0m         url\u001b[38;5;241m=\u001b[39murl,\n\u001b[0;32m    670\u001b[0m         body\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mbody,\n\u001b[0;32m    671\u001b[0m         headers\u001b[38;5;241m=\u001b[39mrequest\u001b[38;5;241m.\u001b[39mheaders,\n\u001b[0;32m    672\u001b[0m         redirect\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m    673\u001b[0m         assert_same_host\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m    674\u001b[0m         preload_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m    675\u001b[0m         decode_content\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[0;32m    676\u001b[0m         retries\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_retries,\n\u001b[0;32m    677\u001b[0m         timeout\u001b[38;5;241m=\u001b[39mtimeout,\n\u001b[0;32m    678\u001b[0m         chunked\u001b[38;5;241m=\u001b[39mchunked,\n\u001b[0;32m    679\u001b[0m     )\n\u001b[0;32m    681\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (ProtocolError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m err:\n\u001b[0;32m    682\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m(err, request\u001b[38;5;241m=\u001b[39mrequest)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\urllib3\\connectionpool.py:789\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[1;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001b[0m\n\u001b[0;32m    786\u001b[0m response_conn \u001b[38;5;241m=\u001b[39m conn \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m release_conn \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m    788\u001b[0m \u001b[38;5;66;03m# Make the request on the HTTPConnection object\u001b[39;00m\n\u001b[1;32m--> 789\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_request(\n\u001b[0;32m    790\u001b[0m     conn,\n\u001b[0;32m    791\u001b[0m     method,\n\u001b[0;32m    792\u001b[0m     url,\n\u001b[0;32m    793\u001b[0m     timeout\u001b[38;5;241m=\u001b[39mtimeout_obj,\n\u001b[0;32m    794\u001b[0m     body\u001b[38;5;241m=\u001b[39mbody,\n\u001b[0;32m    795\u001b[0m     headers\u001b[38;5;241m=\u001b[39mheaders,\n\u001b[0;32m    796\u001b[0m     chunked\u001b[38;5;241m=\u001b[39mchunked,\n\u001b[0;32m    797\u001b[0m     retries\u001b[38;5;241m=\u001b[39mretries,\n\u001b[0;32m    798\u001b[0m     response_conn\u001b[38;5;241m=\u001b[39mresponse_conn,\n\u001b[0;32m    799\u001b[0m     preload_content\u001b[38;5;241m=\u001b[39mpreload_content,\n\u001b[0;32m    800\u001b[0m     decode_content\u001b[38;5;241m=\u001b[39mdecode_content,\n\u001b[0;32m    801\u001b[0m     \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mresponse_kw,\n\u001b[0;32m    802\u001b[0m )\n\u001b[0;32m    804\u001b[0m \u001b[38;5;66;03m# Everything went great!\u001b[39;00m\n\u001b[0;32m    805\u001b[0m clean_exit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\urllib3\\connectionpool.py:536\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[1;34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001b[0m\n\u001b[0;32m    534\u001b[0m \u001b[38;5;66;03m# Receive the response from the server\u001b[39;00m\n\u001b[0;32m    535\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 536\u001b[0m     response \u001b[38;5;241m=\u001b[39m conn\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[0;32m    537\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (BaseSSLError, \u001b[38;5;167;01mOSError\u001b[39;00m) \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[0;32m    538\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_raise_timeout(err\u001b[38;5;241m=\u001b[39me, url\u001b[38;5;241m=\u001b[39murl, timeout_value\u001b[38;5;241m=\u001b[39mread_timeout)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\site-packages\\urllib3\\connection.py:507\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    504\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mresponse\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HTTPResponse\n\u001b[0;32m    506\u001b[0m \u001b[38;5;66;03m# Get the response from http.client.HTTPConnection\u001b[39;00m\n\u001b[1;32m--> 507\u001b[0m httplib_response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mgetresponse()\n\u001b[0;32m    509\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m    510\u001b[0m     assert_header_parsing(httplib_response\u001b[38;5;241m.\u001b[39mmsg)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\http\\client.py:1428\u001b[0m, in \u001b[0;36mHTTPConnection.getresponse\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m   1426\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m   1427\u001b[0m     \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1428\u001b[0m         response\u001b[38;5;241m.\u001b[39mbegin()\n\u001b[0;32m   1429\u001b[0m     \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mConnectionError\u001b[39;00m:\n\u001b[0;32m   1430\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclose()\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\http\\client.py:331\u001b[0m, in \u001b[0;36mHTTPResponse.begin\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    329\u001b[0m \u001b[38;5;66;03m# read until we get a non-100 response\u001b[39;00m\n\u001b[0;32m    330\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[1;32m--> 331\u001b[0m     version, status, reason \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_read_status()\n\u001b[0;32m    332\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m status \u001b[38;5;241m!=\u001b[39m CONTINUE:\n\u001b[0;32m    333\u001b[0m         \u001b[38;5;28;01mbreak\u001b[39;00m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\http\\client.py:292\u001b[0m, in \u001b[0;36mHTTPResponse._read_status\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    291\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_read_status\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m--> 292\u001b[0m     line \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfp\u001b[38;5;241m.\u001b[39mreadline(_MAXLINE \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m), \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124miso-8859-1\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    293\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(line) \u001b[38;5;241m>\u001b[39m _MAXLINE:\n\u001b[0;32m    294\u001b[0m         \u001b[38;5;28;01mraise\u001b[39;00m LineTooLong(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstatus line\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\llm\\Lib\\socket.py:720\u001b[0m, in \u001b[0;36mSocketIO.readinto\u001b[1;34m(self, b)\u001b[0m\n\u001b[0;32m    718\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m    719\u001b[0m     \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 720\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sock\u001b[38;5;241m.\u001b[39mrecv_into(b)\n\u001b[0;32m    721\u001b[0m     \u001b[38;5;28;01mexcept\u001b[39;00m timeout:\n\u001b[0;32m    722\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout_occurred \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import requests\n",
    "\n",
    "# 定义ollama服务的URL\n",
    "url = \"http://localhost:11434/api/chat\"  # 替换为实际的API端点\n",
    "\n",
    "prompt = ROUGH_OUTLINE_PROMPT\n",
    "# 定义请求的payload（如果有的话）\n",
    "data = {\n",
    "    \"model\": \"qwen2.5-14b-4bit\",\n",
    "    \"messages\": [\n",
    "        {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": prompt\n",
    "        }\n",
    "    ],\n",
    "    \"stream\": False\n",
    "}\n",
    "\n",
    "# 发送POST请求\n",
    "response = requests.post(url, json=data)\n",
    "\n",
    "# 检查响应状态码\n",
    "if response.status_code == 200:\n",
    "    # 解析响应内容\n",
    "    result = response.json()\n",
    "    print(\"Response from ollama service:\", result)\n",
    "else:\n",
    "    print(f\"Failed to call ollama service. Status code: {response.status_code}\")\n",
    "    print(\"Response content:\", response.text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "id": "8f499b77088cdf59",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-10T01:59:44.944962Z",
     "start_time": "2025-01-10T01:59:44.938096Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'index': 0,\n",
       " 'content': '症状的发病部位是什么？',\n",
       " 'tokens': [],\n",
       " 'id_slot': 0,\n",
       " 'stop': True,\n",
       " 'model': 'gpt-3.5-turbo',\n",
       " 'tokens_predicted': 6,\n",
       " 'tokens_evaluated': 1,\n",
       " 'generation_settings': {'n_predict': 50,\n",
       "  'seed': 4294967295,\n",
       "  'temperature': 0.699999988079071,\n",
       "  'dynatemp_range': 0.0,\n",
       "  'dynatemp_exponent': 1.0,\n",
       "  'top_k': 40,\n",
       "  'top_p': 0.8999999761581421,\n",
       "  'min_p': 0.05000000074505806,\n",
       "  'xtc_probability': 0.0,\n",
       "  'xtc_threshold': 0.10000000149011612,\n",
       "  'typical_p': 1.0,\n",
       "  'repeat_last_n': 64,\n",
       "  'repeat_penalty': 1.0,\n",
       "  'presence_penalty': 0.0,\n",
       "  'frequency_penalty': 0.0,\n",
       "  'dry_multiplier': 0.0,\n",
       "  'dry_base': 1.75,\n",
       "  'dry_allowed_length': 2,\n",
       "  'dry_penalty_last_n': 2048,\n",
       "  'dry_sequence_breakers': ['\\n', ':', '\"', '*'],\n",
       "  'mirostat': 0,\n",
       "  'mirostat_tau': 5.0,\n",
       "  'mirostat_eta': 0.10000000149011612,\n",
       "  'stop': ['\\n', '###'],\n",
       "  'max_tokens': 50,\n",
       "  'n_keep': 0,\n",
       "  'n_discard': 0,\n",
       "  'ignore_eos': False,\n",
       "  'stream': False,\n",
       "  'logit_bias': [],\n",
       "  'n_probs': 0,\n",
       "  'min_keep': 0,\n",
       "  'grammar': '',\n",
       "  'samplers': ['penalties',\n",
       "   'dry',\n",
       "   'top_k',\n",
       "   'typ_p',\n",
       "   'top_p',\n",
       "   'min_p',\n",
       "   'xtc',\n",
       "   'temperature'],\n",
       "  'speculative.n_max': 16,\n",
       "  'speculative.n_min': 5,\n",
       "  'speculative.p_min': 0.8999999761581421,\n",
       "  'timings_per_token': False,\n",
       "  'post_sampling_probs': False},\n",
       " 'prompt': 'hi',\n",
       " 'has_new_line': False,\n",
       " 'truncated': False,\n",
       " 'stop_type': 'word',\n",
       " 'stopping_word': '\\n',\n",
       " 'tokens_cached': 6,\n",
       " 'timings': {'prompt_n': 1,\n",
       "  'prompt_ms': 267.748,\n",
       "  'prompt_per_token_ms': 267.748,\n",
       "  'prompt_per_second': 3.7348551623168054,\n",
       "  'predicted_n': 6,\n",
       "  'predicted_ms': 256.769,\n",
       "  'predicted_per_token_ms': 42.79483333333334,\n",
       "  'predicted_per_second': 23.367306801054642}}"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "79936e4637d5e417",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
