{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 知识库接口的类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "from pprint import pprint\n",
    "import json\n",
    "\n",
    "class Knowledge:\n",
    "\n",
    "    def __init__(self):\n",
    "        self.base_url = r'http://180.184.65.98:38880/atomgit/'\n",
    "\n",
    "    # 测试接口\n",
    "    def test(self):\n",
    "        url = self.base_url + 'metadata'\n",
    "        resp = requests.get(url=url)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 根据相似内容查找文章\n",
    "    def search_paper(self, params):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            params = {\n",
    "                \"query\": \"yolov11\",(检索相似的内容, 必选)\n",
    "                \"top_k\": 5,(可选)\n",
    "            }\n",
    "        return distance, entity: paper_id, paper_title, chunk_id, chunk_text, original_filename\n",
    "        \"\"\"\n",
    "        url = self.base_url + 'search_papers'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 根据paper_id查找\n",
    "    def query_by_paper_id(self, params):\n",
    "        \"return paper_id, paper_title, chunk_id, chunk_text, original_filename\"\n",
    "        url = self.base_url + \"query_by_paper_id\"\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 根据标题查找\n",
    "    def query_by_title(self, params):\n",
    "        url = self.base_url + \"query_by_title\"\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        resp = json.loads(resp.text)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    def query_by_title_contain(self, params):\n",
    "        url = self.base_url + \"query_by_title_contain\"\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    def query_by_chunk_contain(self, params):\n",
    "        url = self.base_url + \"query_by_chunk_contain\"\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 找相似论文标题\n",
    "    def query_by_title_like(self, params):\n",
    "        \"\"\"\n",
    "        return 相似论文信息列表\n",
    "        \"\"\"\n",
    "        url = self.base_url + \"query_by_title_like\"\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 关键词查找论文ID和标题\n",
    "    def query_by_keyword(self, params):\n",
    "\n",
    "        url = self.base_url + 'query_by_keyword'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 论文ID查找全文\n",
    "    def query_whole_text_by_id(self, params):\n",
    "        url = self.base_url + 'query_whole_text_by_id'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 论文标题查找全文\n",
    "    def query_whole_text_by_title(self, params):\n",
    "        url = self.base_url + 'query_whole_text_by_title'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 论文ID查找关键词\n",
    "    def query_keywords_by_id(self, params):\n",
    "        url = self.base_url + 'query_keywords_by_id'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 论文标题查找关键词\n",
    "    def query_keywords_by_title(self, params):\n",
    "        url = self.base_url + 'query_keywords_by_title'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 查看关键词统计\n",
    "    def keywords_metadata(self):\n",
    "        url = self.base_url + 'keywords_metadata'\n",
    "        resp = requests.get(url=url)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 搜索标题中包含特定关键词的论文元数据\n",
    "    def query_paper_metadata_that_title_contain(self, params):\n",
    "        url = self.base_url + 'query_paper_metadata_that_title_contain'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text\n",
    "\n",
    "    # 查找与输入标题相似的标题列表\n",
    "    def titles_like(self, params):\n",
    "        url = self.base_url + 'titles_like'\n",
    "        resp = requests.get(url=url, params=params)\n",
    "        # print(resp.text)\n",
    "        return resp.text"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def ppr(result):\n",
    "    result = json.loads(result)\n",
    "    pprint(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "db = Knowledge()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 各个接口测试"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1 查看数据库基本信息"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'chunk_num': 151816,\n",
      " 'paper_num': 17942,\n",
      " 'track_stats': {'total_track_nums': 49,\n",
      "                 'track_counts': {'Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db': 11539,\n",
      "                                  'Conf_Paper_Meta_Data_AAAI_2023_with_whole_text.db': 523,\n",
      "                                  'Conf_Paper_Meta_Data_ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics_with_whole_text.db': 2811,\n",
      "                                  'Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db': 4943,\n",
      "                                  'Conf_Paper_Meta_Data_CCS_2022_with_whole_text.db': 710,\n",
      "                                  'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db': 10804,\n",
      "                                  'Conf_Paper_Meta_Data_CVPR_2022_IEEE_Conference_on_Computer_Vision_and_Pattern_Recognition_with_whole_text.db': 1304,\n",
      "                                  'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db': 12242,\n",
      "                                  'Conf_Paper_Meta_Data_Crypto_2022_with_whole_text.db': 46,\n",
      "                                  'Conf_Paper_Meta_Data_Crypto_2023_with_whole_text.db': 261,\n",
      "                                  'Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db': 482,\n",
      "                                  'Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db': 13649,\n",
      "                                  'Conf_Paper_Meta_Data_ECCV_2022_European_Conference_on_Computer_Vision_with_whole_text.db': 4539,\n",
      "                                  'Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db': 3079,\n",
      "                                  'Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db': 4938,\n",
      "                                  'Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db': 10389,\n",
      "                                  'Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db': 13174,\n",
      "                                  'Conf_Paper_Meta_Data_ICLR_2022_International_Conference_on_Learning_Representation_with_whole_text.db': 3000,\n",
      "                                  'Conf_Paper_Meta_Data_ICLR_2023_with_whole_text.db': 7290,\n",
      "                                  'Conf_Paper_Meta_Data_ICML2024_with_whole_text.db': 5623,\n",
      "                                  'Conf_Paper_Meta_Data_ICML_2022_International_Conference_on_Machine_Learning_with_whole_text.db': 4336,\n",
      "                                  'Conf_Paper_Meta_Data_ICML_2023_with_whole_text.db': 139,\n",
      "                                  'Conf_Paper_Meta_Data_IJCAI2023_with_whole_text.db': 2188,\n",
      "                                  'Conf_Paper_Meta_Data_IJCAI2024_with_whole_text.db': 2583,\n",
      "                                  'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db': 916,\n",
      "                                  'Conf_Paper_Meta_Data_ISSTA_2022_with_whole_text.db': 111,\n",
      "                                  'Conf_Paper_Meta_Data_KDD2023_with_whole_text.db': 1710,\n",
      "                                  'Conf_Paper_Meta_Data_MobiCom_2023_with_whole_text.db': 602,\n",
      "                                  'Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db': 12416,\n",
      "                                  'Conf_Paper_Meta_Data_NeurIPS_2023_with_whole_text.db': 950,\n",
      "                                  'Conf_Paper_Meta_Data_SIGIR2023_with_whole_text.db': 656,\n",
      "                                  'Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db': 1140,\n",
      "                                  'Conf_Paper_Meta_Data_SIGIR_2022_Special_Interest_Group_on_Information_Retrieval_with_whole_text.db': 528,\n",
      "                                  'Conf_Paper_Meta_Data_SIGMOD_2023_with_whole_text.db': 9,\n",
      "                                  'Conf_Paper_Meta_Data_SP_2022_with_whole_text.db': 397,\n",
      "                                  'Conf_Paper_Meta_Data_SP_2023_with_whole_text.db': 335,\n",
      "                                  'Conf_Paper_Meta_Data_STOC_2023_with_whole_text.db': 1167,\n",
      "                                  'Conf_Paper_Meta_Data_USENIX_Security_2022_with_whole_text.db': 668,\n",
      "                                  'Conf_Paper_Meta_Data_USENIX_Security_2023_with_whole_text.db': 460,\n",
      "                                  'Conf_Paper_Meta_Data_VLDB2023_with_whole_text.db': 111,\n",
      "                                  'Conf_Paper_Meta_Data_VLDB_2022_with_whole_text.db': 215,\n",
      "                                  'Conf_Paper_Meta_Data_WWW_2022_The_Web_Conference_with_whole_text.db': 770,\n",
      "                                  'Conf_Paper_Meta_Data_WWW_2023__with_whole_text.db': 1557,\n",
      "                                  'Conf_Paper_Meta_Data__STOC_2022_with_whole_text.db': 961,\n",
      "                                  'Journal_Paper_Meta_Data_Artificial_Intelligence_with_whole_text.db': 446,\n",
      "                                  'Journal_Paper_Meta_Data_IEEE_Transactions_on_Knowledge_and_Data_Engineering_with_whole_text.db': 901,\n",
      "                                  'Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db': 3275,\n",
      "                                  'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db': 434,\n",
      "                                  'Journal_Paper_Meta_Data_Journal_of_Machine_Learning_Research_with_whole_text.db': 489}}}\n"
     ]
    }
   ],
   "source": [
    "result = db.test()\n",
    "ppr(result)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2根据相似内容查找文章片段"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{\"id\":454849335631998344,\"distance\":0.4951184391975403,\"entity\":{\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":4,\"chunk_text\":\"# 4.4. Fine-tuning YOLO-World\\nIn this section, we further fine-tune YOLO-World for closeset object detection on the COCO dataset and LVIS dataset to demonstrate the effectiveness of the pre-training.  \\n\\nTable 5. Text Encoder in YOLO-World. We ablate different text encoders in YOLO-World through the zero-shot LVIS evaluation.   \\n\\n\\n<html><body><table><tr><td>TextEncoder</td><td>Frozen?</td><td>AP</td><td>APr APc</td><td>APf</td></tr><tr><td>BERT-base</td><td>Frozen</td><td>14.6</td><td>3.4 10.7</td><td>20.0</td></tr><tr><td>BERT-base</td><td>Fine-tune</td><td>18.3</td><td>6.6 14.6</td><td>23.6</td></tr><tr><td>CLIP-base</td><td>Frozen</td><td>22.4</td><td>14.5 20.1</td><td>26.0</td></tr><tr><td>CLIP-base</td><td>Fine-tune</td><td>19.3</td><td>8.6 15.7</td><td>24.8</td></tr></table></body></html>  \\n\\nExperimental Setup. We use the pre-trained weights to initialize YOLO-World for fine-tuning. All models are finetuned for 80 epochs with the AdamW optimizer and the initial learning rate is set to 0.0002. In addition, we fine-tune the CLIP text encoder with a learning factor of 0.01. For the LVIS dataset, we follow previous works [ 7 ,12 ,59 ] and finetune YOLO-World on the LVIS-base (common & frequent) and evaluate it on the LVIS-novel (rare).  \\n\\nCOCO Object Detection. We compare the pre-trained YOLO-World with previous YOLO detectors [ 19 ,22 ,48 ]in Tab. 6 .For fine-tuning YOLO-World on the COCO dataset, we remove the proposed RepVL-PAN for further acceleration considering that the vocabulary size of the COCO dataset is small. In Tab. 6 , it’s evident that our approach can achieve decent zero-shot performance on the COCO dataset, which indicates that YOLO-World has strong generalization ability. Moreover, YOLO-World after fine-tuning on the COCO train2017 demonstrates higher performance compared to previous methods trained from scratch.  \\n\\nTable 6. Comparison with YOLOs on COCO Object Detection. We fine-tune the YOLO-World on COCO train2017 and evaluate on COCO val2017 . The results of YOLOv7 [ 48 ] and YOLOv8 [ 19 ] are obtained from MMYOLO [ 3 ]. ‘O’, $\\\\mathbf{\\\\omega}^{\\\\ast}\\\\mathbf{G}^{\\\\ast}$ , and ‘C’ denote pertaining using Objects365, GoldG, and $\\\\mathrm{{CC3M}^{\\\\dag}}$ , respectively. The FPS is measured on one NVIDIA V100 w/ TensorRT.   \\n\\n\\n<html><body><table><tr><td>Method</td><td>Pre-train</td><td>AP</td><td>AP50</td><td>AP75</td><td>FPS</td></tr><tr><td colspan=\\\"6\\\">Trainingfromscratch.</td></tr><tr><td>YOLOv6-S [22]</td><td></td><td>43.7</td><td>60.8</td><td>47.0</td><td>442</td></tr><tr><td>YOLOv6-M [22]</td><td>x</td><td>48.4</td><td>65.7</td><td>52.7</td><td>277</td></tr><tr><td>YOLOv6-L [22]</td><td>x</td><td>50.7</td><td>68.1</td><td>54.8</td><td>166</td></tr><tr><td>YOLOv7-T [48]</td><td>x</td><td>37.5</td><td>55.8</td><td>40.2</td><td>404</td></tr><tr><td>YOLOv7-L [48]</td><td>x</td><td>50.9</td><td>69.3</td><td>55.3</td><td>182</td></tr><tr><td>YOLOv7-X[48]</td><td></td><td>52.6</td><td>70.6</td><td>57.3</td><td>131</td></tr><tr><td>YOLOv8-S [19]</td><td>x</td><td>44.4</td><td>61.2</td><td>48.1</td><td>386</td></tr><tr><td>YOLOv8-M [19]</td><td>x</td><td>50.5</td><td>67.3</td><td>55.0</td><td>238</td></tr><tr><td>YOLOv8-L [19]</td><td></td><td>52.9</td><td>69.9</td><td>57.7</td><td>159</td></tr><tr><td colspan=\\\"6\\\">Zero-shot transfer.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>37.6</td><td>52.3</td><td>40.7</td><td></td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>42.8</td><td>58.3</td><td>46.4</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G</td><td>44.4</td><td>59.8</td><td>48.3</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>45.1</td><td>60.7</td><td>48.9</td><td></td></tr><tr><td colspan=\\\"6\\\">Fine-tunedw/RepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.9</td><td>62.3</td><td>50.1</td><td></td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>51.2</td><td>68.1</td><td>55.9</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.1</td><td>58.2</td><td></td></tr><tr><td colspan=\\\"6\\\">Fine-tunedw/oRepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.7</td><td>62.3</td><td>49.9</td><td>373</td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>50.7</td><td>67.2</td><td>55.1</td><td>231</td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.3</td><td>58.1</td><td>156</td></tr></table></body></html>  \\n\\nLVIS Object Detection. In Tab. 7 , we evaluate the finetuning performance of YOLO-World on the standard LVIS dataset. Firstly, compared to the oracle YOLOv8s [ 19 ]trained on the full LVIS datasets, YOLO-World achieves significant improvements, especially for larger models, e.g ., YOLO-World-L outperforms YOLOv8-L by $7.2\\\\,\\\\textrm{A P}$ and $10.2~\\\\mathrm{AP}_{r}$ . The improvements can demonstrate the effectiveness of the proposed pre-training strategy for largevocabulary detection. Moreover, YOLO-World, as an efficient one-stage detector, outperforms previous state-of-theart two-stage methods [ 7 ,12 ,21 ,49 ,59 ] on the overall performance without extra designs, e.g ., learnable prompts [ 7 ]or region-based alginments [ 12 ].  \\n\\n<html><body><table><tr><td>Method</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>ViLD [12]</td><td>27.8</td><td>16.7</td><td>26.5</td><td>34.2</td></tr><tr><td>RegionCLIP [58]</td><td>28.2</td><td>17.1</td><td></td><td></td></tr><tr><td>Detic [59]</td><td>26.8</td><td>17.8</td><td></td><td></td></tr><tr><td>FVLM [21]</td><td>24.2</td><td>18.6</td><td>-</td><td></td></tr><tr><td>DetPro [7]</td><td>28.4</td><td>20.8</td><td>27.8</td><td>32.4</td></tr><tr><td>BARON [49]</td><td>29.5</td><td>23.2</td><td>29.3</td><td>32.5</td></tr><tr><td>YOLOv8-S</td><td>19.4</td><td>7.4</td><td>17.4</td><td>27.0</td></tr><tr><td>YOLOv8-M</td><td>23.1</td><td>8.4</td><td>21.3</td><td>31.5</td></tr><tr><td>YOLOv8-L</td><td>26.9</td><td>10.2</td><td>25.4</td><td>35.8</td></tr><tr><td>YOLO-World-S</td><td>23.9</td><td>12.8</td><td>20.4</td><td>32.7</td></tr><tr><td>YOLO-World-M</td><td>28.8</td><td>15.9</td><td>24.6</td><td>39.0</td></tr><tr><td>YOLO-World-L</td><td>34.1</td><td>20.4</td><td>31.1</td><td>43.5</td></tr></table></body></html>\\n\\n# 4.5. Open-Vocabulary Instance Segmentation\\nIn this section, we further fine-tune YOLO-World for segmenting objects under the open-vocabulary setting, which can be termed open-vocabulary instance segmentation (OVIS). Previous methods [ 17 ] have explored OVIS with pseudo-labelling on novel objects. Differently, considering that YOLO-World has strong transfer and generalization capabilities, we directly fine-tune YOLO-World on a subset of data with mask annotations and evaluate the segmentation performance under large-vocabulary settings. Specifically, we benchmark open-vocabulary instance segmentation under two settings:  \\n\\n• (1) COCO to LVIS setting, we fine-tune YOLO-World on the COCO dataset (including 80 categories) with mask annotations, under which the models need to transfer from 80 categories to 1203 categories $\\\\langle80\\\\rightarrow1203\\\\rangle$ );   \\n• (2) LVIS-base to LVIS setting, we fine-tune YOLO-World on the LVIS-base (including 866 categories, common & frequent) with mask annotations, under which the models need to transfer from 866 categories to 1203 categories $\\\\langle866\\\\rightarrow1203\\\\rangle$ ).   \\nWe evaluate the fine-tuned models on the standard LVIS   \\nval2017 with 1203 categories, in which 337 rare cate  \\ngories are unseen and can be used to measure the open  \\nvocabulary performance.  \\n\\nResults. Tab. 8 shows the experimental results of extending YOLO-World for open-vocabulary instance segmentation. Specifically, we adopt two fine-tuning strategies: (1) only fine-tuning the segmentation head and (2) fine-tuning all modules. Under strategy (1), the fine-tuned YOLOWorld still retains the zero-shot capabilities acquired from the pre-training stage, allowing it to generalize to unseen categories without additional fine-tuning. Strategy (2) enables YOLO-World fit the LVIS dataset better, but it may result in the degradation of the zero-shot capabilities.  \\n\\nTab. 8 shows the comparisons of fine-tuning YOLOWorld with different settings (COCO or LVIS-base) and different strategies (fine-tuning seg. head or fine-tuning all). Firstly, fine-tuning on LVIS-base obtains better performance compared to that based on COCO. However, the ratios between AP and $\\\\mathrm{AP}_{r}$ $\\\\langle\\\\boldsymbol{\\\\mathrm{AP}}_{r}/\\\\boldsymbol{\\\\mathrm{AP}}\\\\rangle$ are nearly unchanged, e.g ., the ratios of YOLO-World on COCO and LVIS-base are $76.5\\\\%$ and $74.3\\\\%$ , respectively. Considering that the detector is frozen, we attribute the performance gap to the fact that the LVIS dataset provides more detailed and denser segmentation annotations, which are beneficial for learning the segmentation head. When fine-tuning all modules, YOLO-World obtains remarkable improvements on LVIS, e.g ., YOLO-World-L achieves 9.6 AP gain. However, the fine-tuning might degrade the open-vocabulary performance and lead to a 0.6 box $\\\\mathrm{AP}_{r}$ drop for YOLO-World-L.\\n\\n# 4.6. Visualizations\\nWe provide the visualization results of pre-trained YOLOWorld-L under three settings: (a) we perform zero-shot inference with LVIS categories; (b) we input the custom prompts with fine-grained categories with attributes; (c) referring detection. The visualizations also demonstrate that YOLO-World has a strong generalization ability for openvocabulary scenarios along with referring ability.  \\n\\nZero-shot Inference on LVIS. Fig. 5 shows the visualization results based on the LVIS categories which are generated by the pre-trained YOLO-World-L in a zero-shot manner. The pre-trained YOLO-World exhibits strong zeroshot transfer capabilities and is able to detect as many objects as possible within the image.  \\n\\nInference with User’s Vocabulary. In Fig. 6 , we explore the detection capabilities of YOLO-World with our defined categories. The visualization results demonstrate that the pre-trained YOLO-World-L also exhibits the capability for (1) fine-grained detection ( i.e ., detect the parts of one object) and (2) fine-grained classification ( i.e ., distinguish different sub-categories of objects.).  \\n\\nReferring Object Detection. In Fig. 7 , we leverage some descriptive (discriminative) noun phrases as input, e.g ., the standing person, to explore whether the model can locate regions or objects in the image that match our given input. The visualization results display the phrases and their corresponding bounding boxes, demonstrating that the pretrained YOLO-World has the referring or grounding capability. This ability can be attributed to the proposed pretraining strategy with large-scale training data.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024}},{\"id\":454847525166381188,\"distance\":0.489339143037796,\"entity\":{\"paper_id\":\"642f8a3790e50fcafd4314f8\",\"paper_title\":\"Uncurated Image-Text Datasets: Shedding Light on Demographic Bias\",\"chunk_id\":6,\"chunk_text\":\"# B. YOLOv5 bias evaluation\\nWe evaluate YOLOv5 in terms of skin-tone bias to check whether the use of this model can be a contributing factor to the representation discrepancies in PHASE ${\\\\mathrm{o}}$ annotations. We detect people in MSCOCO and compare accuracy per skin-tone using [ 60 ] annotations. Results are reported as follows: darker skin-tone recall is 0 .49 , and lighter skintone recall is 0 .55 . This shows that there is, indeed, a difference in performance according to skin-tone. However, we believe that it is not as big as to justify the representation gap that was found in the GCC dataset and the conclusions of our analysis still stand.\\n\\n# C. CLIP evaluation results\\nWe report the results of the CLIP evaluation when balancing the number of samples per class and attribute. Table 8 compares CLIP performance in $\\\\mathbf{R}(\\\\omega k$ for $k\\\\ =\\\\ 1,5,10$ when using all the samples in the validation set (Unbalanced), and when using the same number of samples per class and attribute (Balanced). For the balanced results, we use the number of samples in the smallest class in each attribute. Results are reported as mean and standard deviation over 100 runs with different random samples per class.  \\n\\nTable 6. Classes in the validation set and classes labeled as unsafe by Stable Diffusion’s Safety Checker.   \\n\\n\\n<html><body><table><tr><td>Attribute</td><td>Validation set (%)</td><td>Unsafe label (%)</td></tr><tr><td>age</td><td></td><td></td></tr><tr><td>baby</td><td>0.89</td><td>3.23</td></tr><tr><td>child</td><td>6.70</td><td>12.90</td></tr><tr><td>young</td><td>29.24</td><td>32.26</td></tr><tr><td>adult</td><td>32.70</td><td>32.26</td></tr><tr><td>senior</td><td>2.77</td><td>3.23</td></tr><tr><td>unsure</td><td>1.11</td><td>0.00</td></tr><tr><td>multiple</td><td>26.59</td><td>16.13</td></tr><tr><td>gender</td><td></td><td></td></tr><tr><td>man</td><td>42.26</td><td>35.48</td></tr><tr><td>woman</td><td>35.04</td><td>51.61</td></tr><tr><td>unsure</td><td>0.98</td><td>0.00</td></tr><tr><td>multiple</td><td>21.72</td><td>12.90</td></tr><tr><td>skin-tone (binary)</td><td></td><td></td></tr><tr><td>lighter</td><td>68.62</td><td>80.65</td></tr><tr><td>darker</td><td>6.89</td><td>3.23</td></tr><tr><td>unsure</td><td>5.66</td><td>3.23</td></tr><tr><td>multiple</td><td>18.83</td><td>12.90</td></tr><tr><td>ethnicity</td><td></td><td></td></tr><tr><td>Black</td><td>4.20</td><td>3.23</td></tr><tr><td>East Asian</td><td>1.26</td><td>0.00</td></tr><tr><td>Indian</td><td>1.95</td><td>3.23</td></tr><tr><td>Latino</td><td>0.61</td><td>0.00</td></tr><tr><td>MiddleEastern</td><td>0.35</td><td>0.00</td></tr><tr><td>Southeast Asian</td><td>0.35</td><td>0.00</td></tr><tr><td>White</td><td>48.35</td><td>64.52</td></tr><tr><td>unsure</td><td>5.52</td><td>3.23</td></tr><tr><td>multiple</td><td>37.41</td><td>25.81</td></tr></table></body></html>  \\n\\nCLIP with occlusions To better understand what about the image leads to differing performances, we occlude the detected bounding boxes and repeat the evaluation process. We find that masking people bounding boxes makes CLIP’s $\\\\mathbf{R}\\\\mathcal{@}1$ drop from about $30\\\\%$ to $8\\\\%$ for all the attributes, which means that relevant information is contained in person regions. Moreover, as shown in Table 5 , the conclusions are maintained, e.g . recall for man is higher than for woman ,which suggests that part of the bias is from the language.\\n\\n# D. Stable Diffusion results\\nTable 6 reports the statistics of Stable Diffusion’s Safety Checker per attribute and class. We compare the percentage of samples per class in the image annotations, with the percentage of samples per class labeled as unsafe by the Safety Checker. It stands out that the class woman raises $51.61\\\\%$ of the unsafe labels whereas only accounts for $35.04\\\\%$ of the original images. Lighter skin-tone and White ethnicity also show big increases in the percentage of samples raised as unsafe, but differently from the woman class, both of them are the predominant class in their respective attributes.  \\n\\nTable 7. Statistics of annotations in PHASE ${\\\\mathrm{o}}$ per attribute and class. Annotations reports the raw number of annotations per class. Regions is the total number of region-level annotations after majority voting. Images accounts for the number of images with at least one regionlevel annotation with the class. Due to the inter-annotator agreement results, skin-tone region-level annotations are conducted for binary skin-tone only. For each attribute, the most common class is highlighted in bold and the unsure class in italics .  \\n\\n\\n<html><body><table><tr><td>Attribute</td><td>Annotations</td><td>Regions</td><td>Images</td><td>Attribute</td><td>Annotations</td><td>Regions</td><td>Images</td></tr><tr><td>age</td><td>106, 041</td><td>35,347</td><td>18,889</td><td>skin-tone type</td><td>105,801</td><td></td><td></td></tr><tr><td>baby</td><td>955</td><td>306</td><td>259</td><td>type 1</td><td>15,388</td><td></td><td></td></tr><tr><td>child</td><td>7,829</td><td>2,578</td><td>1,569</td><td>type 2</td><td>49,821</td><td></td><td></td></tr><tr><td>young adult</td><td>40,398</td><td>13,313</td><td>8,841</td><td>type 3</td><td>18,083</td><td></td><td></td></tr><tr><td>adult</td><td>48,604</td><td>16,117</td><td>10,631</td><td>type 4</td><td>8,219</td><td></td><td></td></tr><tr><td>senior</td><td>4,632</td><td>1,375</td><td>1,152</td><td>type 5</td><td>5,771</td><td></td><td></td></tr><tr><td>unsure</td><td>3,623</td><td>653</td><td>525</td><td>type 6</td><td>4,570</td><td></td><td></td></tr><tr><td>gender</td><td>106, 041</td><td>35,347</td><td>18,889</td><td>unsure</td><td>3,949</td><td></td><td></td></tr><tr><td>man</td><td>67,122</td><td>22,491</td><td>13,511</td><td>skin-tone (binary)</td><td></td><td>35,347</td><td>18,889</td></tr><tr><td>woman</td><td>36,936</td><td>12,406</td><td>8,329</td><td>lighter</td><td></td><td>28,187</td><td>16,245</td></tr><tr><td>unsure</td><td>1,983</td><td>285</td><td>241</td><td>darker</td><td></td><td>5,572</td><td>3,838</td></tr><tr><td>ethnicity</td><td>105,801</td><td>35,347</td><td>18,889</td><td>unsure</td><td></td><td>922</td><td>730</td></tr><tr><td>Black</td><td>11, 314</td><td>3,664</td><td>2,657</td><td>activity</td><td>97,021</td><td>35,347</td><td>18,889</td></tr><tr><td>East Asian</td><td>3,957</td><td>953</td><td>707</td><td>caring</td><td>786</td><td>127</td><td>119</td></tr><tr><td>Indian</td><td>4, 434</td><td>980</td><td>616</td><td>music</td><td>14,706</td><td>5,012</td><td>3,218</td></tr><tr><td>Latino</td><td>8,826</td><td>1,309</td><td>1,168</td><td>eating</td><td>1,012</td><td>278</td><td>206</td></tr><tr><td>Middle Eastern</td><td>5,513</td><td>373</td><td>349</td><td>household</td><td>180</td><td>19</td><td>18</td></tr><tr><td>Southeast Asian</td><td>2, 706</td><td>211</td><td>174</td><td>personal</td><td>291</td><td>39</td><td>33</td></tr><tr><td>White</td><td>63,253</td><td>22,098</td><td>13,698</td><td>posing</td><td>30,409</td><td>10,121</td><td>6,619</td></tr><tr><td>unsure</td><td>5,578</td><td>1,289</td><td>1,021</td><td>sports</td><td>19,933</td><td>6,725</td><td>3,807</td></tr><tr><td>emotion</td><td>100,248</td><td>35,347</td><td>18,889</td><td>transportation</td><td>811</td><td>224</td><td>181</td></tr><tr><td>happy</td><td>41, 059</td><td>12,603</td><td>8,215</td><td>work</td><td>5,043</td><td>1,433</td><td>891</td></tr><tr><td>sad</td><td>2,221</td><td>331</td><td>308</td><td>sports</td><td>19,933</td><td>6,725</td><td>3,807</td></tr><tr><td>fear</td><td>1,205</td><td>117</td><td>114</td><td>other</td><td>22,770</td><td>7,249</td><td>4,247</td></tr><tr><td>anger</td><td>2,391</td><td>377</td><td>346</td><td>unsure</td><td>1,080</td><td>164</td><td>149</td></tr><tr><td>neutral</td><td>47,367</td><td>16,646</td><td>10,473</td><td></td><td></td><td></td><td></td></tr><tr><td>unsure</td><td>6,005</td><td>1,663</td><td>1,224</td><td></td><td></td><td></td><td></td></tr></table></body></html>  \\n\\nTable 8. CLIP evaluation on PHASE ${\\\\bf\\\\xi})$ validation set. Results are reported as $\\\\mathbf{R}{\\\\mathcal{@}}k$ for $k=1,5,10$ .Unbalanced denotes when all the samples are used, resulting in highly unbalanced classes. Balanced denotes when using the same number of samples per class and attribute.   \\n\\n\\n<html><body><table><tr><td></td><td></td><td colspan=\\\"4\\\">Unbalanced</td><td colspan=\\\"4\\\">Balanced</td></tr><tr><td>Attribute</td><td>Class</td><td>Size</td><td>R@1</td><td>R@5</td><td>R@10</td><td>Size</td><td>R@1</td><td>R@5</td><td>R@10</td></tr><tr><td>age</td><td>baby & child</td><td>350</td><td>44.0</td><td>65.4</td><td>74.0</td><td>128</td><td>44.0 ± 3.5</td><td>65.3 ± 3.4</td><td>73.9 ± 3.1</td></tr><tr><td></td><td>young</td><td>1, 349</td><td>30.4</td><td>51.3</td><td>60.9</td><td>128</td><td>29.8 ± 3.9</td><td>51.0 ± 4.5</td><td>60.8 ± 4.4</td></tr><tr><td></td><td>adult</td><td>1,509</td><td>27.3</td><td>46.7</td><td>55.9</td><td>128</td><td>27.5 ± 3.8</td><td>46.5 ± 4.2</td><td>55.4 ± 4.1</td></tr><tr><td></td><td>senior</td><td>128</td><td>44.5</td><td>64.1</td><td>71.1</td><td>128</td><td>44.5 ± 0.0</td><td>64.1 ± 0.0</td><td>71.1 ± 0.0</td></tr><tr><td>gender</td><td>man</td><td>1, 950</td><td>32.0</td><td>53.2</td><td>63.1</td><td>1,617</td><td>32.1 ± 0.4</td><td>53.2 ± 0.5</td><td>63.1 ± 0.4</td></tr><tr><td></td><td>woman</td><td>1,617</td><td>30.6</td><td>49.8</td><td>59.1</td><td>1,617</td><td>30.6 ± 0.0</td><td>49.8 ± 0.0</td><td>59.1 ± 0.0</td></tr><tr><td>skin-tone</td><td>lighter</td><td>3,166</td><td>30.2</td><td>50.6</td><td>59.9</td><td>318</td><td>30.1 ± 2.6</td><td>50.4 ± 2.8</td><td>60.1 ± 2.8</td></tr><tr><td></td><td>darker</td><td>318</td><td>31.1</td><td>54.1</td><td>62.3</td><td>318</td><td>31.1 ± 0.0</td><td>54.1 ± 0.0</td><td>62.3± 0.0</td></tr><tr><td>ethnicity</td><td>Black</td><td>194</td><td>29.4</td><td>51.5</td><td>58.8</td><td>16</td><td>29.1 ± 11.5</td><td>49.9 ± 11.2</td><td>57.7 ± 11.4</td></tr><tr><td></td><td>East Asian</td><td>58</td><td>34.8</td><td>56.9</td><td>63.8</td><td>16</td><td>33.5 ± 10.5</td><td>56.4 ± 10.5</td><td>64.2 ± 10.7</td></tr><tr><td></td><td>Indian</td><td>90</td><td>34.4</td><td>61.1</td><td>68.9</td><td>16</td><td>34.4 ± 12.3</td><td>61.9 ± 11.8</td><td>69.5 ± 11.4</td></tr><tr><td></td><td>Latino</td><td>28</td><td>21.4</td><td>39.3</td><td>50.0</td><td>16</td><td>21.4 ± 6.2</td><td>37.9 ± 8.2</td><td>48.6 ± 8.6</td></tr><tr><td></td><td>Middle Eastern</td><td>16</td><td>31.3</td><td>62.5</td><td>75.0</td><td>16</td><td>31.3 ± 0.0</td><td>62.5 ± 0.0</td><td>75.0 ± 0.0</td></tr><tr><td></td><td>Southeast Asian</td><td>16</td><td>31.3</td><td>37.5</td><td>56.3</td><td>16</td><td>31.3 ± 0.0</td><td>37.5 ± 0.0</td><td>56.3 ± 0.0</td></tr><tr><td></td><td>White</td><td>2,231</td><td>30.6</td><td>50.6</td><td>59.5</td><td>16</td><td>31.2 ± 12.6</td><td>50.8 ± 12.3</td><td>59.3 ± 11.7</td></tr></table></body></html>\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db\",\"year\":2023}}]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "\"\\n[\\n    {\\n        'id': ,\\n        'distance': ,\\n        'entity':{\\n            'paper_id': ,\\n            'paper_title': ,\\n            'paper_chunk': ,\\n            'chunk_text': ,\\n            'original_filename': ,\\n            'year': ,\\n        }\\n    },\\n    {}\\n]\\n\""
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "params = {\n",
    "    \"query\": \"yolov11\",\n",
    "    \"top_k\": 2,\n",
    "}\n",
    "result = db.search_paper(params)\n",
    "print(result)\n",
    "# ppr(result)\n",
    "\n",
    "\"\"\"\n",
    "[\n",
    "    {\n",
    "        'id': ,\n",
    "        'distance': ,\n",
    "        'entity':{\n",
    "            'paper_id': ,\n",
    "            'paper_title': ,\n",
    "            'paper_chunk': ,\n",
    "            'chunk_text': ,\n",
    "            'original_filename': ,\n",
    "            'year': ,\n",
    "        }\n",
    "    },\n",
    "    {}\n",
    "]\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3根据paper_id查找"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[{\"id\":454849335381650816,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":0,\"chunk_text\":\"# YOLO-World: Real-Time Open-Vocabulary Object Detection\\nTianheng Cheng 3 ,,∗, Lin $\\\\mathrm{Song^{1,*,\\\\infty}}$ , Yixiao $\\\\mathrm{Ge^{1,2,\\\\dag}}$ , Wenyu Liu 3 , Xinggang Wang 3 ,, Ying Shan 1 ,∗equal contribution †project lead ✉corresponding author  \\n\\n1 Tencent AI Lab 2 ARC Lab, Tencent PCG 3 School of EIC, Huazhong University of Science & Technology  \\n\\nCode & Models: YOLO-World\\n\\n# Abstract\\nThe You Only Look Once (YOLO) series of detectors have established themselves as efficient and practical tools. However, their reliance on predefined and trained object categories limits their applicability in open scenarios. Addressing this limitation, we introduce YOLO-World, an innovative approach that enhances YOLO with openvocabulary detection capabilities through vision-language modeling and pre-training on large-scale datasets. Specifically, we propose a new Re-parameterizable VisionLanguage Path Aggregation Network (RepVL-PAN) and region-text contrastive loss to facilitate the interaction between visual and linguistic information. Our method excels in detecting a wide range of objects in a zero-shot manner with high efficiency. On the challenging LVIS dataset, YOLO-World achieves 35.4 AP with 52.0 FPS on V100, which outperforms many state-of-the-art methods in terms of both accuracy and speed. Furthermore, the fine-tuned YOLO-World achieves remarkable performance on several downstream tasks, including object detection and openvocabulary instance segmentation.\\n\\n# 1. Introduction\\nObject detection has been a long-standing and fundamental challenge in computer vision with numerous applications in image understanding, robotics, and autonomous vehicles. Tremendous works [ 15 ,26 ,40 ,42 ] have achieved significant breakthroughs in object detection with the development of deep neural networks. Despite the success of these methods, they remain limited as they only handle object detection with a fixed vocabulary, e.g ., 80 categories in the COCO [ 25 ] dataset. Once object categories are defined and labeled, trained detectors can only detect those specific categories, thus limiting the ability and applicability of open  \\n\\n  \\nFigure 1. Speed-and-Accuracy Curve. We compare YOLOWorld with recent open-vocabulary methods in terms of speed and accuracy. All models are evaluated on the LVIS minival and inference speeds are measured on one NVIDIA ${\\\\mathrm{V}}100\\\\,{\\\\mathrm{w}}/{\\\\mathrm{o}}$ TensorRT. The size of the circle represents the model’s size.  \\n\\nscenarios.  \\n\\nRecent works [ 7 ,12 ,49 ,54 ?]have explored the prevalent vision-language models [ 18 ,36 ] to address openvocabulary detection [ 54 ] through distilling vocabulary knowledge from language encoders, e.g ., BERT [ 5 ]. However, these distillation-based methods are much limited due to the scarcity of training data with a limited diversity of vocabulary, e.g ., OV-COCO [ 54 ] containing 48 base categories. Several methods [ 23 ,29 ,52 ,53 ,55 ] reformulate object detection training as region-level vision-language pretraining and train open-vocabulary object detectors at scale. However, those methods still struggle for detection in realworld scenarios, which suffer from two aspects: (1) heavy computation burden and (2) complicated deployment for edge devices. Previous works [ 23 ,29 ,52 ,53 ,55 ] have demonstrated the promising performance of pre-training large detectors while pre-training small detectors to endow them with open recognition capabilities remains unexplored.  \\n\\nIn this paper, we present YOLO-World, aiming for high-efficiency open-vocabulary object detection, and explore large-scale pre-training schemes to boost the traditional YOLO detectors to a new open-vocabulary world. Compared to previous methods, the proposed YOLOWorld is remarkably efficient with high inference speed and easy to deploy for downstream applications. Specifically, YOLO-World follows the standard YOLO architecture [ 19 ] and leverages the pre-trained CLIP [ 36 ] text encoder to encode the input texts. We further propose the Re-parameterizable Vision-Language Path Aggregation Network (RepVL-PAN) to connect text features and image features for better visual-semantic representation. During inference, the text encoder can be removed and the text embeddings can be re-parameterized into weights of RepVL-PAN for efficient deployment. We further investigate the open-vocabulary pre-training scheme for YOLO detectors through region-text contrastive learning on largescale datasets, which unifies detection data, grounding data, and image-text data into region-text pairs. The pre-trained YOLO-World with abundant region-text pairs demonstrates a strong capability for large vocabulary detection and training more data leads to greater improvements in openvocabulary capability.  \\n\\nIn addition, we explore a prompt-then-detect paradigm to further improve the efficiency of open-vocabulary object detection in real-world scenarios. As illustrated in Fig. 2 ,traditional object detectors [ 15 ,19 ,22 ,38 –40 ,48 ] concentrate on the fixed-vocabulary (close-set) detection with predefined and trained categories. While previous openvocabulary detectors [ 23 ,29 ,52 ,55 ] encode the prompts of a user for online vocabulary with text encoders and detect objects. Notably, those methods tend to employ large detectors with heavy backbones, e.g ., Swin-L [ 31 ], to increase the open-vocabulary capacity. In contrast, the prompt-thendetect paradigm (Fig. 2 (c)) first encodes the prompts of a user to build an offline vocabulary and the vocabulary varies with different needs. Then, the efficient detector can infer the offline vocabulary on the fly without re-encoding the prompts. For practical applications, once we have trained the detector, i.e ., YOLO-World, we can pre-encode the prompts or categories to build an offline vocabulary and then seamlessly integrate it into the detector.  \\n\\nOur main contributions can be summarized into three folds:  \\n\\n• We introduce the YOLO-World, a cutting-edge openvocabulary object detector with high efficiency for realworld applications. • We propose a Re-parameterizable Vision-Language PAN to connect vision and language features and an openvocabulary region-text contrastive pre-training scheme for YOLO-World.  \\n\\n• The proposed YOLO-World pre-trained on large-scale datasets demonstrates strong zero-shot performance and achieves $35.4\\\\,\\\\mathrm{AP}$ on LVIS with 52.0 FPS. The pre-trained YOLO-World can be easily adapted to downstream tasks, e.g ., open-vocabulary instance segmentation and referring object detection. Moreover, the pre-trained weights and codes of YOLO-World will be open-sourced to facilitate more practical applications.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335441681794,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":1,\"chunk_text\":\"# 2. Related Works\\n\\n# 2.1. Traditional Object Detection\\nPrevalent object detection research concentrates on fixedvocabulary (close-set) detection, in which object detectors are trained on datasets with pre-defined categories, e.g ., COCO dataset [ 25 ] and Objects365 dataset [ 43 ], and then detect objects within the fixed set of categories. During the past decades, the methods for traditional object detection can be simply categorized into three groups, i.e ., region-based methods, pixel-based methods, and querybased methods. The region-based methods [ 10 ,11 ,15 ,26 ,41 ], such as Faster R-CNN [ 41 ], adopt a two-stage framework for proposal generation [ 41 ] and RoI-wise (Regionof-Interest) classification and regression. The pixel-based methods [ 27 ,30 ,39 ,45 ,57 ] tend to be one-stage detectors, which perform classification and regression over predefined anchors or pixels. DETR [ 1 ] first explores object detection through transformers [ 46 ] and inspires extensive query-based methods [ 60 ]. In terms of inference speed, Redmon et al . presents YOLOs [ 37 –39 ] which exploit simple convolutional architectures for real-time object detection. Several works [ 9 ,22 ,32 ,48 ,51 ] propose various architectures or designs for YOLO, including path aggregation networks [ 28 ], cross-stage partial networks [ 47 ], and re-parameterization [ 6 ], which further improve both speed and accuracy. In comparison to previous YOLOs, YOLOWorld in this paper aims to detect objects beyond the fixed vocabulary with strong generalization ability.\\n\\n# 2.2. Open-Vocabulary Object Detection\\nOpen-vocabulary object detection (OVD) [ 54 ] has emerged as a new trend for modern object detection, which aims to detect objects beyond the predefined categories. Early works [ 12 ] follow the standard OVD setting [ 54 ] by training detectors on the base classes and evaluating the novel (unknown) classes. Nevertheless, this open-vocabulary setting can evaluate the capability of detectors to detect and recognize novel objects, it is still limited for open scenarios and lacks generalization ability to other domains due to training on the limited dataset and vocabulary.  \\n\\n  \\nFigure 2. Comparison with Detection Paradigms. (a) Traditional Object Detector : These object detectors can only detect objects within the fixed vocabulary pre-defined by the training datasets, caption, noun phrases, category… ., 80 categories of COCO dataset [ 25 ]. The fixed vocabulary limits the extension for open scenes. (b) Previous Open-Vocabulary Detectors: Previous methods tend to develop large and heavy detectors for open-vocabulary detection which intuitively have strong capacity. In addition, these detectors simultaneously encode images and texts as input for prediction, which is time-consuming for practical applications. (c) YOLO-World: We demonstrate the strong open-vocabulary performance of lightweight detectors, e.g ., YOLO detectors [ 19 ,39 ], which is of great significance for real-world applications. Rather than using online vocabulary, we present a prompt-then-detect paradigm for efficient inference, in which the user generates a series of prompts according to the need and the prompts will be encoded into an offline vocabulary. Then it can be re-parameterized as the model weights for deployment and further acceleration.  \\n\\n  \\nFigure 3. Overall Architecture of YOLO-World. Compared to traditional YOLO detectors, YOLO-World as an open-vocabulary detector adopts text as input. The Text Encoder first encodes the input text input text embeddings. Then the Image Encoder encodes the input image into multi-scale image features and the proposed RepVL-PAN exploits the multi-level cross-modality fusion for both image and text features. Finally, YOLO-World predicts the regressed bounding boxes and the object embeddings for matching the categories or nouns that appeared in the input text.  \\n\\nInspired by vision-language pre-training [ 18 ,36 ], recent works [ 7 ,21 ,49 ,58 ,59 ] formulate open-vocabulary object detection as image-text matching and exploit largescale image-text data to increase the training vocabulary at scale. GLIP [ 23 ] presents a pre-training framework for open-vocabulary detection based on phrase grounding and evaluates in a zero-shot setting. Grounding DINO [ 29 ]incorporates the grounded pre-training [ 23 ] into detection transformers [ 56 ] with cross-modality fusions.  \\n\\nSeveral methods [ 24 ,52 ,53 ,55 ] unify detection datasets and image-text datasets through region-text matching and pre-train detectors with large-scale image-text pairs, achieving promising performance and generalization. However, these methods often use heavy detectors like ATSS [ 57 ]or DINO [ 56 ] with Swin-L [ 31 ] as a backbone, leading to high computational demands and deployment challenges. In contrast, we present YOLO-World, aiming for efficient open-vocabulary object detection with real-time inference and easier downstream application deployment. Differing from ZSD-YOLO [ 50 ], which also explores openvocabulary detection [ 54 ] with YOLO through language model alignment, YOLO-World introduces a novel YOLO framework with an effective pre-training strategy, enhancing open-vocabulary performance and generalization.\\n\\n# 3. Method\\n\\n# 3.1. Pre-training Formulation: Region-Text Pairs\\nThe traditional object detection methods, including the YOLO-series [ 19 ], are trained with instance annotations $\\\\Omega\\\\,=\\\\,\\\\{B_{i},c_{i}\\\\}_{i=1}^{N}$ and category labels , which consist of bounding boxes $\\\\{c_{i}\\\\}$ . In this paper, w $\\\\{B_{i}\\\\}$ ewhere instan $t_{i}$ annotations as region-text pairs is the corresponding text for the region $\\\\Omega\\\\,=\\\\,\\\\{B_{i},t_{i}\\\\}_{i=1}^{N}$ $B_{i}$ . Specif},ically, the text $t_{i}$ can be the category name, noun phrases, or object descriptions. Moreover, YOLO-World adopts both the image $I$ and texts $T$ (a set of nouns) as input and outputs predicted boxes dings $\\\\{e_{k}\\\\}$ $(e_{k}\\\\in\\\\mathbb{R}^{D})$ $\\\\{\\\\hat{B}_{k}\\\\}$ ). and the corresponding object embed\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024}]\n"
     ]
    }
   ],
   "source": [
    "params ={\n",
    "    \"paper_id\": '65b9ac94939a5f4082225348',\n",
    "    \"top_k\": 2,\n",
    "}\n",
    "result = db.query_by_paper_id(params)\n",
    "print(result)\n",
    "# 会按顺序返回这些论文的片段"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4 查找相似论文标题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[{\"id\":454849335381650816,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":0,\"chunk_text\":\"# YOLO-World: Real-Time Open-Vocabulary Object Detection\\nTianheng Cheng 3 ,,∗, Lin $\\\\mathrm{Song^{1,*,\\\\infty}}$ , Yixiao $\\\\mathrm{Ge^{1,2,\\\\dag}}$ , Wenyu Liu 3 , Xinggang Wang 3 ,, Ying Shan 1 ,∗equal contribution †project lead ✉corresponding author  \\n\\n1 Tencent AI Lab 2 ARC Lab, Tencent PCG 3 School of EIC, Huazhong University of Science & Technology  \\n\\nCode & Models: YOLO-World\\n\\n# Abstract\\nThe You Only Look Once (YOLO) series of detectors have established themselves as efficient and practical tools. However, their reliance on predefined and trained object categories limits their applicability in open scenarios. Addressing this limitation, we introduce YOLO-World, an innovative approach that enhances YOLO with openvocabulary detection capabilities through vision-language modeling and pre-training on large-scale datasets. Specifically, we propose a new Re-parameterizable VisionLanguage Path Aggregation Network (RepVL-PAN) and region-text contrastive loss to facilitate the interaction between visual and linguistic information. Our method excels in detecting a wide range of objects in a zero-shot manner with high efficiency. On the challenging LVIS dataset, YOLO-World achieves 35.4 AP with 52.0 FPS on V100, which outperforms many state-of-the-art methods in terms of both accuracy and speed. Furthermore, the fine-tuned YOLO-World achieves remarkable performance on several downstream tasks, including object detection and openvocabulary instance segmentation.\\n\\n# 1. Introduction\\nObject detection has been a long-standing and fundamental challenge in computer vision with numerous applications in image understanding, robotics, and autonomous vehicles. Tremendous works [ 15 ,26 ,40 ,42 ] have achieved significant breakthroughs in object detection with the development of deep neural networks. Despite the success of these methods, they remain limited as they only handle object detection with a fixed vocabulary, e.g ., 80 categories in the COCO [ 25 ] dataset. Once object categories are defined and labeled, trained detectors can only detect those specific categories, thus limiting the ability and applicability of open  \\n\\n  \\nFigure 1. Speed-and-Accuracy Curve. We compare YOLOWorld with recent open-vocabulary methods in terms of speed and accuracy. All models are evaluated on the LVIS minival and inference speeds are measured on one NVIDIA ${\\\\mathrm{V}}100\\\\,{\\\\mathrm{w}}/{\\\\mathrm{o}}$ TensorRT. The size of the circle represents the model’s size.  \\n\\nscenarios.  \\n\\nRecent works [ 7 ,12 ,49 ,54 ?]have explored the prevalent vision-language models [ 18 ,36 ] to address openvocabulary detection [ 54 ] through distilling vocabulary knowledge from language encoders, e.g ., BERT [ 5 ]. However, these distillation-based methods are much limited due to the scarcity of training data with a limited diversity of vocabulary, e.g ., OV-COCO [ 54 ] containing 48 base categories. Several methods [ 23 ,29 ,52 ,53 ,55 ] reformulate object detection training as region-level vision-language pretraining and train open-vocabulary object detectors at scale. However, those methods still struggle for detection in realworld scenarios, which suffer from two aspects: (1) heavy computation burden and (2) complicated deployment for edge devices. Previous works [ 23 ,29 ,52 ,53 ,55 ] have demonstrated the promising performance of pre-training large detectors while pre-training small detectors to endow them with open recognition capabilities remains unexplored.  \\n\\nIn this paper, we present YOLO-World, aiming for high-efficiency open-vocabulary object detection, and explore large-scale pre-training schemes to boost the traditional YOLO detectors to a new open-vocabulary world. Compared to previous methods, the proposed YOLOWorld is remarkably efficient with high inference speed and easy to deploy for downstream applications. Specifically, YOLO-World follows the standard YOLO architecture [ 19 ] and leverages the pre-trained CLIP [ 36 ] text encoder to encode the input texts. We further propose the Re-parameterizable Vision-Language Path Aggregation Network (RepVL-PAN) to connect text features and image features for better visual-semantic representation. During inference, the text encoder can be removed and the text embeddings can be re-parameterized into weights of RepVL-PAN for efficient deployment. We further investigate the open-vocabulary pre-training scheme for YOLO detectors through region-text contrastive learning on largescale datasets, which unifies detection data, grounding data, and image-text data into region-text pairs. The pre-trained YOLO-World with abundant region-text pairs demonstrates a strong capability for large vocabulary detection and training more data leads to greater improvements in openvocabulary capability.  \\n\\nIn addition, we explore a prompt-then-detect paradigm to further improve the efficiency of open-vocabulary object detection in real-world scenarios. As illustrated in Fig. 2 ,traditional object detectors [ 15 ,19 ,22 ,38 –40 ,48 ] concentrate on the fixed-vocabulary (close-set) detection with predefined and trained categories. While previous openvocabulary detectors [ 23 ,29 ,52 ,55 ] encode the prompts of a user for online vocabulary with text encoders and detect objects. Notably, those methods tend to employ large detectors with heavy backbones, e.g ., Swin-L [ 31 ], to increase the open-vocabulary capacity. In contrast, the prompt-thendetect paradigm (Fig. 2 (c)) first encodes the prompts of a user to build an offline vocabulary and the vocabulary varies with different needs. Then, the efficient detector can infer the offline vocabulary on the fly without re-encoding the prompts. For practical applications, once we have trained the detector, i.e ., YOLO-World, we can pre-encode the prompts or categories to build an offline vocabulary and then seamlessly integrate it into the detector.  \\n\\nOur main contributions can be summarized into three folds:  \\n\\n• We introduce the YOLO-World, a cutting-edge openvocabulary object detector with high efficiency for realworld applications. • We propose a Re-parameterizable Vision-Language PAN to connect vision and language features and an openvocabulary region-text contrastive pre-training scheme for YOLO-World.  \\n\\n• The proposed YOLO-World pre-trained on large-scale datasets demonstrates strong zero-shot performance and achieves $35.4\\\\,\\\\mathrm{AP}$ on LVIS with 52.0 FPS. The pre-trained YOLO-World can be easily adapted to downstream tasks, e.g ., open-vocabulary instance segmentation and referring object detection. Moreover, the pre-trained weights and codes of YOLO-World will be open-sourced to facilitate more practical applications.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335441681794,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":1,\"chunk_text\":\"# 2. Related Works\\n\\n# 2.1. Traditional Object Detection\\nPrevalent object detection research concentrates on fixedvocabulary (close-set) detection, in which object detectors are trained on datasets with pre-defined categories, e.g ., COCO dataset [ 25 ] and Objects365 dataset [ 43 ], and then detect objects within the fixed set of categories. During the past decades, the methods for traditional object detection can be simply categorized into three groups, i.e ., region-based methods, pixel-based methods, and querybased methods. The region-based methods [ 10 ,11 ,15 ,26 ,41 ], such as Faster R-CNN [ 41 ], adopt a two-stage framework for proposal generation [ 41 ] and RoI-wise (Regionof-Interest) classification and regression. The pixel-based methods [ 27 ,30 ,39 ,45 ,57 ] tend to be one-stage detectors, which perform classification and regression over predefined anchors or pixels. DETR [ 1 ] first explores object detection through transformers [ 46 ] and inspires extensive query-based methods [ 60 ]. In terms of inference speed, Redmon et al . presents YOLOs [ 37 –39 ] which exploit simple convolutional architectures for real-time object detection. Several works [ 9 ,22 ,32 ,48 ,51 ] propose various architectures or designs for YOLO, including path aggregation networks [ 28 ], cross-stage partial networks [ 47 ], and re-parameterization [ 6 ], which further improve both speed and accuracy. In comparison to previous YOLOs, YOLOWorld in this paper aims to detect objects beyond the fixed vocabulary with strong generalization ability.\\n\\n# 2.2. Open-Vocabulary Object Detection\\nOpen-vocabulary object detection (OVD) [ 54 ] has emerged as a new trend for modern object detection, which aims to detect objects beyond the predefined categories. Early works [ 12 ] follow the standard OVD setting [ 54 ] by training detectors on the base classes and evaluating the novel (unknown) classes. Nevertheless, this open-vocabulary setting can evaluate the capability of detectors to detect and recognize novel objects, it is still limited for open scenarios and lacks generalization ability to other domains due to training on the limited dataset and vocabulary.  \\n\\n  \\nFigure 2. Comparison with Detection Paradigms. (a) Traditional Object Detector : These object detectors can only detect objects within the fixed vocabulary pre-defined by the training datasets, caption, noun phrases, category… ., 80 categories of COCO dataset [ 25 ]. The fixed vocabulary limits the extension for open scenes. (b) Previous Open-Vocabulary Detectors: Previous methods tend to develop large and heavy detectors for open-vocabulary detection which intuitively have strong capacity. In addition, these detectors simultaneously encode images and texts as input for prediction, which is time-consuming for practical applications. (c) YOLO-World: We demonstrate the strong open-vocabulary performance of lightweight detectors, e.g ., YOLO detectors [ 19 ,39 ], which is of great significance for real-world applications. Rather than using online vocabulary, we present a prompt-then-detect paradigm for efficient inference, in which the user generates a series of prompts according to the need and the prompts will be encoded into an offline vocabulary. Then it can be re-parameterized as the model weights for deployment and further acceleration.  \\n\\n  \\nFigure 3. Overall Architecture of YOLO-World. Compared to traditional YOLO detectors, YOLO-World as an open-vocabulary detector adopts text as input. The Text Encoder first encodes the input text input text embeddings. Then the Image Encoder encodes the input image into multi-scale image features and the proposed RepVL-PAN exploits the multi-level cross-modality fusion for both image and text features. Finally, YOLO-World predicts the regressed bounding boxes and the object embeddings for matching the categories or nouns that appeared in the input text.  \\n\\nInspired by vision-language pre-training [ 18 ,36 ], recent works [ 7 ,21 ,49 ,58 ,59 ] formulate open-vocabulary object detection as image-text matching and exploit largescale image-text data to increase the training vocabulary at scale. GLIP [ 23 ] presents a pre-training framework for open-vocabulary detection based on phrase grounding and evaluates in a zero-shot setting. Grounding DINO [ 29 ]incorporates the grounded pre-training [ 23 ] into detection transformers [ 56 ] with cross-modality fusions.  \\n\\nSeveral methods [ 24 ,52 ,53 ,55 ] unify detection datasets and image-text datasets through region-text matching and pre-train detectors with large-scale image-text pairs, achieving promising performance and generalization. However, these methods often use heavy detectors like ATSS [ 57 ]or DINO [ 56 ] with Swin-L [ 31 ] as a backbone, leading to high computational demands and deployment challenges. In contrast, we present YOLO-World, aiming for efficient open-vocabulary object detection with real-time inference and easier downstream application deployment. Differing from ZSD-YOLO [ 50 ], which also explores openvocabulary detection [ 54 ] with YOLO through language model alignment, YOLO-World introduces a novel YOLO framework with an effective pre-training strategy, enhancing open-vocabulary performance and generalization.\\n\\n# 3. Method\\n\\n# 3.1. Pre-training Formulation: Region-Text Pairs\\nThe traditional object detection methods, including the YOLO-series [ 19 ], are trained with instance annotations $\\\\Omega\\\\,=\\\\,\\\\{B_{i},c_{i}\\\\}_{i=1}^{N}$ and category labels , which consist of bounding boxes $\\\\{c_{i}\\\\}$ . In this paper, w $\\\\{B_{i}\\\\}$ ewhere instan $t_{i}$ annotations as region-text pairs is the corresponding text for the region $\\\\Omega\\\\,=\\\\,\\\\{B_{i},t_{i}\\\\}_{i=1}^{N}$ $B_{i}$ . Specif},ically, the text $t_{i}$ can be the category name, noun phrases, or object descriptions. Moreover, YOLO-World adopts both the image $I$ and texts $T$ (a set of nouns) as input and outputs predicted boxes dings $\\\\{e_{k}\\\\}$ $(e_{k}\\\\in\\\\mathbb{R}^{D})$ $\\\\{\\\\hat{B}_{k}\\\\}$ ). and the corresponding object embed\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335500139908,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":2,\"chunk_text\":\"# 3.2. Model Architecture\\nThe overall architecture of the proposed YOLO-World is illustrated in Fig. 3 , which consists of a YOLO detector , a Text Encoder , and a $R e$ -parameterizable Vision-Language Path Aggregation Network (RepVL-PAN). Given the input text, the text encoder in YOLO-World encodes the text into text embeddings. The image encoder in the YOLO detector extracts the multi-scale features from the input image. Then we leverage the RepVL-PAN to enhance both text and image representation by exploiting the cross-modality fusion between image features and text embeddings.  \\n\\nYOLO Detector. YOLO-World is mainly developed based on YOLOv8 [ 19 ], which contains a Darknet backbone [ 19 ,40 ] as the image encoder, a path aggregation network (PAN) for multi-scale feature pyramids, and a head for bounding box regression and object embeddings.  \\n\\nText Encoder. Given the text $T$ , we adopt the Transformer text encoder pre-trained by CLIP [ 36 ] to extract the ponding xt embeddings $W\\\\!=\\\\!\\\\mathbb{T}\\\\!\\\\in\\\\!\\\\mathbb{X}^{\\\\cdot}$ tEn oder $(T)\\\\in$ $\\\\mathbb{R}^{C\\\\times D}$ , where Cis the number of nouns and Dis the embedding dimension. The CLIP text encoder offers better visual-semantic capabilities for connecting visual objects with texts compared to text-only language encoders [ 5 ]. When the input text is a caption or referring expression, we adopt the simple n-gram algorithm to extract the noun phrases and then feed them into the text encoder.  \\n\\nText Contrastive Head. Following previous works [ 19 ], we adopt the decoupled head with two $3\\\\!\\\\times\\\\!3$ convs to regress where bound Kdenotes the number of objects. We present a text boxes $\\\\{b_{k}\\\\}_{k=1}^{K}$ and object embeddings $\\\\{e_{k}\\\\}_{k=1}^{K}$ ,contrastive head to obtain the object-text similarity $s_{k,j}$ by:  \\n\\n$$\\n\\\\begin{array}{r}{s_{k,j}=\\\\alpha\\\\cdot\\\\mathtt{L2}\\\\mathrm{-Norm}(e_{k})\\\\cdot\\\\mathtt{L2}\\\\mathrm{-Norm}(w_{j})^{\\\\top}+\\\\beta,}\\\\end{array}\\n$$  \\n\\nwhere $\\\\mathtt{L2-N o r m}(\\\\cdot)$ is the L2 normalization and $w_{j}\\\\,\\\\in\\\\,W$ is the j-th text embeddings. In addition, we add the affine transformation with the learnable scaling factor $\\\\alpha$ and shifting factor $\\\\beta$ . Both the L2 norms and the affine transformations are important for stabilizing the region-text training.  \\n\\nTraining with Online Vocabulary. During training, we construct an online vocabulary $T$ for each mosaic sample containing 4 images. Specifically, we sample all positive nouns involved in the mosaic images and randomly sample some negative nouns from the corresponding dataset. The vocabulary for each mosaic sample contains at most $M$ nouns, and $M$ is set to 80 as default.  \\n\\nInference with Offline Vocabulary. At the inference stage, we present a prompt-then-detect strategy with an offline vocabulary for further efficiency. As shown in Fig. 3 ,the user can define a series of custom prompts, which might include captions or categories. We then utilize the text encoder to encode these prompts and obtain offline vocabulary embeddings. The offline vocabulary allows for avoiding computation for each input and provides the flexibility to adjust the vocabulary as needed.\\n\\n# 3.3. Re-parameterizable Vision-Language PAN\\nFig. 4 shows the structure of the proposed RepVL-PAN which follows the top-down and bottom-up paths in [ 19 ,28 ]we propose the Text-guided CSPLayer (T-CSPLayer) and multi-scale image features to establish the feature pyramids $\\\\{C_{3},C_{4},C_{5}\\\\}$ $\\\\{P_{3},P_{4},P_{5}\\\\}$ .Furthermore, with the Image-Pooling Attention (I-Pooling Attention) to further enhance the interaction between image features and text features, which can improve the visual-semantic representation for open-vocabulary capability. During inference, the offline vocabulary embeddings can be re-parameterized into weights of convolutional or linear layers for deployment.  \\n\\nText-guided CSPLayer. As Fig. 4 illustrates, the crossstage partial layers (CSPLayer) are utilized after the topdown or bottom-up fusion. We extend the CSPLayer (also called C2f ) of [ 19 ] by incorporating text guidance into multi-scale image features to form the Text-guided CSPLayer. Specifically, given the text embeddings $W$ and image features $X_{l}\\\\,\\\\in\\\\,\\\\mathbb{R}^{H\\\\times W\\\\times D}$ $(l\\\\,\\\\in\\\\,\\\\{3,4,5\\\\})$ , we adopt the max-sigmoid attention after the last dark bottleneck block to aggregate text features into image features by:  \\n\\n$$\\nX_{l}^{\\\\prime}=X_{l}\\\\cdot\\\\delta\\\\big(\\\\operatorname*{max}_{j\\\\in\\\\{1..C\\\\}}(X_{l}W_{j}^{\\\\top})\\\\big)^{\\\\top},\\n$$  \\n\\nwhere the updated $X_{l}^{\\\\prime}$ is concatenated with the cross-stage features as output. The $\\\\delta$ indicates the sigmoid function.  \\n\\nImage-Pooling Attention. To enhance the text embeddings with image-aware information, we aggregate image features to update the text embeddings by proposing the Image-Pooling Attention. Rather than directly using crossattention on image features, we leverage max pooling on multi-scale features to $3\\\\times3$ regions, resulting in a are then updated by: total of 27 patch tokens $\\\\tilde{X}\\\\in\\\\mathbb{R}^{27\\\\times D}$ ∈. The text embeddings  \\n\\n  \\nFigure 4. Illustration of the RepVL-PAN. The proposed RepVLPAN adopts the Text-guided CSPLayer (T-CSPLayer) for injecting language information into image features and the Image Pooling Attention (I-Pooling Attention) for enhancing image-aware text embeddings.  \\n\\n$$\\nW^{\\\\prime}=W+\\\\mathbb{M}{\\\\tt u\\\\tt l t i d e a d-a t t e n t i o n}(W,\\\\tilde{X},\\\\tilde{X})\\n$$\\n\\n# 3.4. Pre-training Schemes\\nIn this section, we present the training schemes for pretraining YOLO-World on large-scale detection, grounding, and image-text datasets.  \\n\\nLearning from Region-Text Contrastive Loss. Given the mosaic sample $I$ and texts $T$ , YOLO-World outputs $K$ $\\\\Omega=\\\\{B_{i},t_{i}\\\\}_{i=1}^{N}$ label assignment [ {}tions . We follow [ 8 ] to match the predictions with ground$\\\\{B_{k},s_{k}\\\\}_{k=1}^{K}$ 19 ] and leverage task-aligned along with annotations truth annotations and assign each positive prediction with a text index as the classification label. Based on this vocabulary, we construct the region-text contrastive loss $\\\\mathcal{L}_{\\\\mathrm{con}}$ with region-text pairs through cross entropy between object-text (region-text) similarity and object-text assignments. In addition, we adopt IoU loss and distributed focal loss for bounding box regression and the total training loss is defined as: $\\\\mathcal{L}(I)\\\\,=\\\\,\\\\mathcal{L}_{\\\\mathrm{con}}\\\\,+\\\\,\\\\lambda_{I}\\\\,\\\\cdot\\\\,(\\\\mathcal{L}_{\\\\mathrm{iou}}\\\\,+\\\\,\\\\mathcal{L}_{\\\\mathrm{dfl}})$ ,wh re $\\\\lambda_{I}$ is an indicator factor and set to 1 when input image I is from detection or grounding data and set to 0 when it is from the image-text data. Considering image-text datasets have noisy boxes, we only calculate the regression loss for samples with accurate bounding boxes.  \\n\\nPseudo Labeling with Image-Text Data. Rather than directly using image-text pairs for pre-training, we propose an automatic labeling approach to generate region-text pairs. Specifically, the labeling approach contains three steps: (1) extract noun phrases : we first utilize the n-gram algorithm to extract noun phrases from the text; (2) pseudo labeling : we adopt a pre-trained open-vocabulary detector, e.g ., GLIP [ 23 ], to generate pseudo boxes for the given noun phrases for each image, thus providing the coarse region-text pairs. (3) filtering : We employ the pre-trained CLIP [ 36 ] to evaluate the relevance of image-text pairs and region-text pairs, and filter the low-relevance pseudo annotations and images. We further filter redundant bounding boxes by incorporating methods such as Non-Maximum Suppression (NMS). We suggest the readers refer to the appendix for the detailed approach. With the above approach, we sample and label 246k images from CC3M [ 44 ] with 821k pseudo annotations.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335560957318,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":3,\"chunk_text\":\"# 4. Experiments\\nIn this section, we demonstrate the effectiveness of the proposed YOLO-World by pre-training it on large-scale datasets and evaluating YOLO-World in a zero-shot manner on both LVIS benchmark and COCO benchmark (Sec. 4.2 ). We also evaluate the fine-tuning performance of YOLOWorld on COCO, LVIS for object detection.\\n\\n# 4.1. Implementation Details\\nThe YOLO-World is developed based on the MMYOLO toolbox [ 3 ] and the MMDetection toolbox [ 2 ]. Following [19 ], we provide three variants of YOLO-World for different latency requirements, e.g ., small (S), medium (M), and large (L). We adopt the open-source CLIP [ 36 ] text encoder with pre-trained weights to encode the input text. Unless specified, we measure the inference speeds of all models on one NVIDIA V100 GPU without extra acceleration mechanisms, e.g ., FP16 or TensorRT.\\n\\n# 4.2. Pre-training\\nExperimental Setup. At the pre-training stage, we adopt the AdamW optimizer [ 33 ] with an initial learning rate of 0.002 and weight decay of 0.05. YOLO-World is pretrained for 100 epochs on on 32 NVIDIA V100 GPUs with a total batch size of 512. During pre-training, we follow previous works [ 19 ] and adopt color augmentation, random affine, random flip, and mosaic with 4 images for data augmentation. The text encoder is frozen during pre-training.  \\n\\nTable 1. Pre-training Data. The specifications of the datasets used for pre-training YOLO-World.   \\n\\n\\n<html><body><table><tr><td>Dataset</td><td>Type</td><td>Vocab.</td><td>Images</td><td>Anno.</td></tr><tr><td>Objects365V1 [43]</td><td>Detection</td><td>365</td><td>609k</td><td>9,621k</td></tr><tr><td>GQA [16]</td><td>Grounding</td><td></td><td>621k</td><td>3,681k</td></tr><tr><td>Flickr[35]</td><td>Grounding</td><td></td><td>149k</td><td>641k</td></tr><tr><td>CC3M↑[44]</td><td>Image-Text</td><td></td><td>246k</td><td>821k</td></tr></table></body></html>  \\n\\nPre-training Data. For pre-training YOLO-World, we mainly adopt detection or grounding datasets including Objects365 (V1) [ 43 ], GQA [ 16 ], Flickr30k [ 35 ], as specified in Tab. 1 .Following [ 23 ], we exclude the images from the COCO dataset in GoldG [ 20 ] (GQA and Flickr30k). The annotations of the detection datasets used for pretraining contain both bounding boxes and categories or noun phrases. In addition, we also extend the pre-training data with image-text pairs, i.e ., $\\\\mathrm{{CC}}3\\\\mathrm{{M}}^{\\\\dag}$ [44 ], which we have labeled 246k images through the pseudo-labeling method discussed in Sec. 3.4 .  \\n\\nZero-shot Evaluation. After pre-training, we directly evaluate the proposed YOLO-World on the LVIS dataset [ 13 ] in a zero-shot manner. The LVIS dataset contains 1203 object categories, which is much more than the categories of the pre-training detection datasets and can measure the performance on large vocabulary detection. Following previous works [ 20 ,23 ,52 ,53 ], we mainly evaluate on LVIS minival [20 ] and report the Fixed $A P$ [4 ] for comparison. The maximum number of predictions is set to 1000.  \\n\\nMain Results on LVIS Object Detection. In Tab. 2 , we compare the proposed YOLO-World with recent state-ofthe-art methods [ 20 ,29 ,52 ,53 ,55 ] on LVIS benchmark in a zero-shot manner. Considering the computation burden and model parameters, we mainly compare with those methods based on lighter backbones, e.g ., Swin-T [ 31 ]. Remarkably, YOLO-World outperforms previous state-of-the-art methods in terms of zero-shot performance and inference speed. Compared to GLIP, GLIPv2, and Grounding DINO, which incorporate more data, e.g ., Cap4M ( $\\\\mathrm{CC}3\\\\mathrm{M}{+}\\\\mathrm{SBU}$ [34 ]), YOLO-World pre-trained on O365 & GolG obtains better performance even with fewer model parameters. Compared to DetCLIP, YOLO-World achieves comparable performance $(35.4\\\\ \\\\nu.s.$ .34.4) while obtaining $20\\\\times$ increase in inference speed. The experimental results also demonstrate that small models, e.g ., YOLO-World-S with 13M parameters, can be used for vision-language pre-training and obtain strong open-vocabulary capabilities.\\n\\n# 4.3. Ablation Experiments\\nWe provide extensive ablation studies to analyze YOLOWorld from two primary aspects, i.e ., pre-training and architecture. Unless specified, we mainly conduct ablation experiments based on YOLO-World-L and pre-train Objects365 with zero-shot evaluation on LVIS minival .  \\n\\nPre-training Data. In Tab. 3 , we evaluate the performance of pre-training YOLO-World using different data. Compared to the baseline trained on Objects365, adding GQA can significantly improve performance with an 8.4 AP gain on LVIS. This improvement can be attributed to the richer textual information provided by the GQA dataset, which can enhance the model’s ability to recognize large vocabulary objects. Adding part of CC3M samples $8\\\\%$ of the full datasets) can further bring $0.5\\\\;\\\\mathrm{AP}$ gain with 1.3 AP on rare objects. Tab. 3 demonstrates that adding more data can effectively improve the detection capabilities on large-vocabulary scenarios. Furthermore, as the amount of data increases, the performance continues to improve, highlighting the benefits of leveraging larger and more diverse datasets for training.  \\n\\nAblations on RepVL-PAN. Tab. 4 demonstrates the effectiveness of the proposed RepVL-PAN of YOLO-World, including Text-guided CSPLayers and Image Pooling Attention, for the zero-shot LVIS detection. Specifically, we adopt two settings, i.e ., (1) pre-training on O365 and (2) pre-training on O365 & GQA. Compared to O365 which only contains category annotations, GQA includes rich texts, particularly in the form of noun phrases. As shown in Tab. 4 , the proposed RepVL-PAN improves the baseline (YOLOv8-PAN [ 19 ]) by 1.1 AP on LVIS, and the improvements are remarkable in terms of the rare categories $(\\\\mathrm{AP}_{r})$ ) of LVIS, which are hard to detect and recognize. In addition, the improvements become more significant when YOLO-World is pre-trained with the GQA dataset and experiments indicate that the proposed RepVL-PAN works better with rich textual information.  \\n\\nText Encoders. In Tab. 5 , we compare the performance of using different text encoders, i.e ., BERT-base [ 5 ] and CLIP-base (ViT-base) [ 36 ]. We exploit two settings during pre-training, i.e ., frozen and fine-tuned, and the learning rate for fine-tuning text encoders is a $0.01\\\\times$ factor of the basic learning rate. As Tab. 5 shows, the CLIP text encoder obtains superior results than BERT $_{\\\\left.+10.1\\\\right.}$ AP for rare categories in LVIS), which is pre-trained with imagetext pairs and has better capability for vision-centric embeddings. Fine-tuning BERT during pre-training brings significant improvements $(+3.7$ AP) while fine-tuning CLIP leads to a severe performance drop. We attribute the drop to that fine-tuning on O365 may degrade the generalization ability of the pre-trained CLIP, which contains only 365 categories and lacks abundant textual information.  \\n\\n<html><body><table><tr><td>Method</td><td>Backbone</td><td>Params</td><td>Pre-trained Data</td><td>FPS</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>MDETR [20]</td><td>R-101 [14]</td><td>169M</td><td>GoldG</td><td></td><td>24.2</td><td>20.9</td><td>24.3</td><td>24.2</td></tr><tr><td>GLIP-T [23]</td><td>Swin-T [31]</td><td>232M</td><td>0365,GoldG</td><td>0.12</td><td>24.9</td><td>17.7</td><td>19.5</td><td>31.0</td></tr><tr><td>GLIP-T [23]</td><td>Swin-T [31]</td><td>232M</td><td>0365,GoldG,Cap4M</td><td>0.12</td><td>26.0</td><td>20.8</td><td>21.4</td><td>31.0</td></tr><tr><td>GLIPv2-T [55]</td><td>Swin-T [31]</td><td>232M</td><td>0365,GoldG</td><td>0.12</td><td>26.9</td><td></td><td></td><td></td></tr><tr><td>GLIPv2-T [55]</td><td>Swin-T [31]</td><td>232M</td><td>0365,GoldG,Cap4M</td><td>0.12</td><td>29.0</td><td>-</td><td></td><td></td></tr><tr><td>Grounding DINO-T [29]</td><td>Swin-T [31]</td><td>172M</td><td>0365,GoldG</td><td>1.5</td><td>25.6</td><td>14.4</td><td>19.6</td><td>32.2</td></tr><tr><td>Grounding DINO-T [29]</td><td>Swin-T [31]</td><td>172M</td><td>0365,GoldG,Cap4M</td><td>1.5</td><td>27.4</td><td>18.1</td><td>23.3</td><td>32.7</td></tr><tr><td>DetCLIP-T [52]</td><td>Swin-T [31]</td><td>155M</td><td>0365,GoldG</td><td>2.3</td><td>34.4</td><td>26.9</td><td>33.9</td><td>36.3</td></tr><tr><td>YOLO-World-S</td><td>YOLOv8-S</td><td>13M (77M)</td><td>0365,GoldG</td><td>74.1 (19.9)</td><td>26.2</td><td>19.1</td><td>23.6</td><td>29.8</td></tr><tr><td>YOLO-World-M</td><td>YOLOv8-M</td><td>29M (92M)</td><td>0365,GoldG</td><td>58.1 (18.5)</td><td>31.0</td><td>23.8</td><td>29.2</td><td>33.9</td></tr><tr><td>YOLO-World-L</td><td>YOLOv8-L</td><td>48M (110M)</td><td>0365,GoldG</td><td>52.0 (17.6)</td><td>35.0</td><td>27.1</td><td>32.8</td><td>38.3</td></tr><tr><td>YOLO-World-L</td><td>YOLOv8-L</td><td>48M (110M)</td><td>0365,GoldG,CC3M</td><td>52.0 (17.6)</td><td>35.4</td><td>27.6</td><td>34.1</td><td>38.0</td></tr></table></body></html>\\n\\nTable 2. Zero-shot Evaluation on LVIS. We evaluate YOLO-World on LVIS minival [20 ] in a zero-shot manner. We report the Fixed $A P$ [4 ] for a fair comparison with recent methods. †denotes the pseudo-labeled CC3M in our setting, which contains 246k samples. The FPS is evaluated on one NVIDIA V100 GPU w/o TensorRT. The parameters and FPS of YOLO-World are evaluated for both the re-parameterized version (w/o bracket) and the original version (w/ bracket).  \\n\\n<html><body><table><tr><td>Pre-trained Data</td><td>AP</td><td>APr APc</td><td>APf</td></tr><tr><td>0365</td><td>23.5</td><td>16.2 21.1</td><td>27.0</td></tr><tr><td>0365,GQA</td><td>31.9</td><td>22.5</td><td>29.9 35.4</td></tr><tr><td>0365,GoldG</td><td>32.5</td><td>22.3</td><td>30.6 36.0</td></tr><tr><td>0365,GoldG,CC3M</td><td>33.0</td><td>23.6</td><td>32.0 35.5</td></tr></table></body></html>  \\n\\nTable 3. Ablations on Pre-training Data. We evaluate the zeroshot performance on LVIS of pre-training YOLO-World with different amounts of data.   \\n\\n\\n<html><body><table><tr><td>GQA</td><td>T-→I</td><td>I-→T</td><td>AP</td><td>APr</td><td>APc APf</td></tr><tr><td>x</td><td>x</td><td></td><td>22.4</td><td>14.5</td><td>20.1 26.0</td></tr><tr><td>x</td><td>人</td><td>x</td><td>23.2</td><td>15.2</td><td>20.6 27.0</td></tr><tr><td></td><td>人</td><td>人</td><td>23.5</td><td>16.2</td><td>21.1 27.0</td></tr><tr><td>人</td><td>x</td><td>x</td><td>29.7</td><td>21.0</td><td>27.1 33.6</td></tr><tr><td>人</td><td>人</td><td>人</td><td>31.9</td><td>22.5</td><td>29.9 35.4</td></tr></table></body></html>\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335631998344,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":4,\"chunk_text\":\"# 4.4. Fine-tuning YOLO-World\\nIn this section, we further fine-tune YOLO-World for closeset object detection on the COCO dataset and LVIS dataset to demonstrate the effectiveness of the pre-training.  \\n\\nTable 5. Text Encoder in YOLO-World. We ablate different text encoders in YOLO-World through the zero-shot LVIS evaluation.   \\n\\n\\n<html><body><table><tr><td>TextEncoder</td><td>Frozen?</td><td>AP</td><td>APr APc</td><td>APf</td></tr><tr><td>BERT-base</td><td>Frozen</td><td>14.6</td><td>3.4 10.7</td><td>20.0</td></tr><tr><td>BERT-base</td><td>Fine-tune</td><td>18.3</td><td>6.6 14.6</td><td>23.6</td></tr><tr><td>CLIP-base</td><td>Frozen</td><td>22.4</td><td>14.5 20.1</td><td>26.0</td></tr><tr><td>CLIP-base</td><td>Fine-tune</td><td>19.3</td><td>8.6 15.7</td><td>24.8</td></tr></table></body></html>  \\n\\nExperimental Setup. We use the pre-trained weights to initialize YOLO-World for fine-tuning. All models are finetuned for 80 epochs with the AdamW optimizer and the initial learning rate is set to 0.0002. In addition, we fine-tune the CLIP text encoder with a learning factor of 0.01. For the LVIS dataset, we follow previous works [ 7 ,12 ,59 ] and finetune YOLO-World on the LVIS-base (common & frequent) and evaluate it on the LVIS-novel (rare).  \\n\\nCOCO Object Detection. We compare the pre-trained YOLO-World with previous YOLO detectors [ 19 ,22 ,48 ]in Tab. 6 .For fine-tuning YOLO-World on the COCO dataset, we remove the proposed RepVL-PAN for further acceleration considering that the vocabulary size of the COCO dataset is small. In Tab. 6 , it’s evident that our approach can achieve decent zero-shot performance on the COCO dataset, which indicates that YOLO-World has strong generalization ability. Moreover, YOLO-World after fine-tuning on the COCO train2017 demonstrates higher performance compared to previous methods trained from scratch.  \\n\\nTable 6. Comparison with YOLOs on COCO Object Detection. We fine-tune the YOLO-World on COCO train2017 and evaluate on COCO val2017 . The results of YOLOv7 [ 48 ] and YOLOv8 [ 19 ] are obtained from MMYOLO [ 3 ]. ‘O’, $\\\\mathbf{\\\\omega}^{\\\\ast}\\\\mathbf{G}^{\\\\ast}$ , and ‘C’ denote pertaining using Objects365, GoldG, and $\\\\mathrm{{CC3M}^{\\\\dag}}$ , respectively. The FPS is measured on one NVIDIA V100 w/ TensorRT.   \\n\\n\\n<html><body><table><tr><td>Method</td><td>Pre-train</td><td>AP</td><td>AP50</td><td>AP75</td><td>FPS</td></tr><tr><td colspan=\\\"6\\\">Trainingfromscratch.</td></tr><tr><td>YOLOv6-S [22]</td><td></td><td>43.7</td><td>60.8</td><td>47.0</td><td>442</td></tr><tr><td>YOLOv6-M [22]</td><td>x</td><td>48.4</td><td>65.7</td><td>52.7</td><td>277</td></tr><tr><td>YOLOv6-L [22]</td><td>x</td><td>50.7</td><td>68.1</td><td>54.8</td><td>166</td></tr><tr><td>YOLOv7-T [48]</td><td>x</td><td>37.5</td><td>55.8</td><td>40.2</td><td>404</td></tr><tr><td>YOLOv7-L [48]</td><td>x</td><td>50.9</td><td>69.3</td><td>55.3</td><td>182</td></tr><tr><td>YOLOv7-X[48]</td><td></td><td>52.6</td><td>70.6</td><td>57.3</td><td>131</td></tr><tr><td>YOLOv8-S [19]</td><td>x</td><td>44.4</td><td>61.2</td><td>48.1</td><td>386</td></tr><tr><td>YOLOv8-M [19]</td><td>x</td><td>50.5</td><td>67.3</td><td>55.0</td><td>238</td></tr><tr><td>YOLOv8-L [19]</td><td></td><td>52.9</td><td>69.9</td><td>57.7</td><td>159</td></tr><tr><td colspan=\\\"6\\\">Zero-shot transfer.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>37.6</td><td>52.3</td><td>40.7</td><td></td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>42.8</td><td>58.3</td><td>46.4</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G</td><td>44.4</td><td>59.8</td><td>48.3</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>45.1</td><td>60.7</td><td>48.9</td><td></td></tr><tr><td colspan=\\\"6\\\">Fine-tunedw/RepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.9</td><td>62.3</td><td>50.1</td><td></td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>51.2</td><td>68.1</td><td>55.9</td><td></td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.1</td><td>58.2</td><td></td></tr><tr><td colspan=\\\"6\\\">Fine-tunedw/oRepVL-PAN.</td></tr><tr><td>YOLO-World-S</td><td>O+G</td><td>45.7</td><td>62.3</td><td>49.9</td><td>373</td></tr><tr><td>YOLO-World-M</td><td>O+G</td><td>50.7</td><td>67.2</td><td>55.1</td><td>231</td></tr><tr><td>YOLO-World-L</td><td>O+G+C</td><td>53.3</td><td>70.3</td><td>58.1</td><td>156</td></tr></table></body></html>  \\n\\nLVIS Object Detection. In Tab. 7 , we evaluate the finetuning performance of YOLO-World on the standard LVIS dataset. Firstly, compared to the oracle YOLOv8s [ 19 ]trained on the full LVIS datasets, YOLO-World achieves significant improvements, especially for larger models, e.g ., YOLO-World-L outperforms YOLOv8-L by $7.2\\\\,\\\\textrm{A P}$ and $10.2~\\\\mathrm{AP}_{r}$ . The improvements can demonstrate the effectiveness of the proposed pre-training strategy for largevocabulary detection. Moreover, YOLO-World, as an efficient one-stage detector, outperforms previous state-of-theart two-stage methods [ 7 ,12 ,21 ,49 ,59 ] on the overall performance without extra designs, e.g ., learnable prompts [ 7 ]or region-based alginments [ 12 ].  \\n\\n<html><body><table><tr><td>Method</td><td>AP</td><td>APr</td><td>APc</td><td>APf</td></tr><tr><td>ViLD [12]</td><td>27.8</td><td>16.7</td><td>26.5</td><td>34.2</td></tr><tr><td>RegionCLIP [58]</td><td>28.2</td><td>17.1</td><td></td><td></td></tr><tr><td>Detic [59]</td><td>26.8</td><td>17.8</td><td></td><td></td></tr><tr><td>FVLM [21]</td><td>24.2</td><td>18.6</td><td>-</td><td></td></tr><tr><td>DetPro [7]</td><td>28.4</td><td>20.8</td><td>27.8</td><td>32.4</td></tr><tr><td>BARON [49]</td><td>29.5</td><td>23.2</td><td>29.3</td><td>32.5</td></tr><tr><td>YOLOv8-S</td><td>19.4</td><td>7.4</td><td>17.4</td><td>27.0</td></tr><tr><td>YOLOv8-M</td><td>23.1</td><td>8.4</td><td>21.3</td><td>31.5</td></tr><tr><td>YOLOv8-L</td><td>26.9</td><td>10.2</td><td>25.4</td><td>35.8</td></tr><tr><td>YOLO-World-S</td><td>23.9</td><td>12.8</td><td>20.4</td><td>32.7</td></tr><tr><td>YOLO-World-M</td><td>28.8</td><td>15.9</td><td>24.6</td><td>39.0</td></tr><tr><td>YOLO-World-L</td><td>34.1</td><td>20.4</td><td>31.1</td><td>43.5</td></tr></table></body></html>\\n\\n# 4.5. Open-Vocabulary Instance Segmentation\\nIn this section, we further fine-tune YOLO-World for segmenting objects under the open-vocabulary setting, which can be termed open-vocabulary instance segmentation (OVIS). Previous methods [ 17 ] have explored OVIS with pseudo-labelling on novel objects. Differently, considering that YOLO-World has strong transfer and generalization capabilities, we directly fine-tune YOLO-World on a subset of data with mask annotations and evaluate the segmentation performance under large-vocabulary settings. Specifically, we benchmark open-vocabulary instance segmentation under two settings:  \\n\\n• (1) COCO to LVIS setting, we fine-tune YOLO-World on the COCO dataset (including 80 categories) with mask annotations, under which the models need to transfer from 80 categories to 1203 categories $\\\\langle80\\\\rightarrow1203\\\\rangle$ );   \\n• (2) LVIS-base to LVIS setting, we fine-tune YOLO-World on the LVIS-base (including 866 categories, common & frequent) with mask annotations, under which the models need to transfer from 866 categories to 1203 categories $\\\\langle866\\\\rightarrow1203\\\\rangle$ ).   \\nWe evaluate the fine-tuned models on the standard LVIS   \\nval2017 with 1203 categories, in which 337 rare cate  \\ngories are unseen and can be used to measure the open  \\nvocabulary performance.  \\n\\nResults. Tab. 8 shows the experimental results of extending YOLO-World for open-vocabulary instance segmentation. Specifically, we adopt two fine-tuning strategies: (1) only fine-tuning the segmentation head and (2) fine-tuning all modules. Under strategy (1), the fine-tuned YOLOWorld still retains the zero-shot capabilities acquired from the pre-training stage, allowing it to generalize to unseen categories without additional fine-tuning. Strategy (2) enables YOLO-World fit the LVIS dataset better, but it may result in the degradation of the zero-shot capabilities.  \\n\\nTab. 8 shows the comparisons of fine-tuning YOLOWorld with different settings (COCO or LVIS-base) and different strategies (fine-tuning seg. head or fine-tuning all). Firstly, fine-tuning on LVIS-base obtains better performance compared to that based on COCO. However, the ratios between AP and $\\\\mathrm{AP}_{r}$ $\\\\langle\\\\boldsymbol{\\\\mathrm{AP}}_{r}/\\\\boldsymbol{\\\\mathrm{AP}}\\\\rangle$ are nearly unchanged, e.g ., the ratios of YOLO-World on COCO and LVIS-base are $76.5\\\\%$ and $74.3\\\\%$ , respectively. Considering that the detector is frozen, we attribute the performance gap to the fact that the LVIS dataset provides more detailed and denser segmentation annotations, which are beneficial for learning the segmentation head. When fine-tuning all modules, YOLO-World obtains remarkable improvements on LVIS, e.g ., YOLO-World-L achieves 9.6 AP gain. However, the fine-tuning might degrade the open-vocabulary performance and lead to a 0.6 box $\\\\mathrm{AP}_{r}$ drop for YOLO-World-L.\\n\\n# 4.6. Visualizations\\nWe provide the visualization results of pre-trained YOLOWorld-L under three settings: (a) we perform zero-shot inference with LVIS categories; (b) we input the custom prompts with fine-grained categories with attributes; (c) referring detection. The visualizations also demonstrate that YOLO-World has a strong generalization ability for openvocabulary scenarios along with referring ability.  \\n\\nZero-shot Inference on LVIS. Fig. 5 shows the visualization results based on the LVIS categories which are generated by the pre-trained YOLO-World-L in a zero-shot manner. The pre-trained YOLO-World exhibits strong zeroshot transfer capabilities and is able to detect as many objects as possible within the image.  \\n\\nInference with User’s Vocabulary. In Fig. 6 , we explore the detection capabilities of YOLO-World with our defined categories. The visualization results demonstrate that the pre-trained YOLO-World-L also exhibits the capability for (1) fine-grained detection ( i.e ., detect the parts of one object) and (2) fine-grained classification ( i.e ., distinguish different sub-categories of objects.).  \\n\\nReferring Object Detection. In Fig. 7 , we leverage some descriptive (discriminative) noun phrases as input, e.g ., the standing person, to explore whether the model can locate regions or objects in the image that match our given input. The visualization results display the phrases and their corresponding bounding boxes, demonstrating that the pretrained YOLO-World has the referring or grounding capability. This ability can be attributed to the proposed pretraining strategy with large-scale training data.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335694388618,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":5,\"chunk_text\":\"# 5. Conclusion\\nWe present YOLO-World, a cutting-edge real-time openvocabulary detector aiming to improve efficiency and openvocabulary capability in real-world applications. In this paper, we have reshaped the prevalent YOLOs as a visionlanguage YOLO architecture for open-vocabulary pretraining and detection and proposed RepVL-PAN, which connects vision and language information with the network and can be re-parameterized for efficient deployment. We further present the effective pre-training schemes with detection, grounding and image-text data to endow YOLOWorld with a strong capability for open-vocabulary detection. Experiments can demonstrate the superiority of YOLO-World in terms of speed and open-vocabulary performance and indicate the effectiveness of vision-language pre-training on small models, which is insightful for future research. We hope YOLO-World can serve as a new benchmark for addressing real-world open-vocabulary detection.\\n\\n\\n\\n# A. Additional Details\\n\\n# A.1. Re-parameterization for RepVL-PAN\\nDuring inference on an offline vocabulary, we adopt reparameterization for RepVL-PAN for faster inference speed and deployment. Firstly, we pre-compute the text embeddings $\\\\bar{W}\\\\in\\\\mathbb{R}^{C\\\\times D}$ through the text encoder.  \\n\\nRe-parameterize T-CSPLayer. For each T-CSPLayer in RepVL-PAN, we can re-parameterize and simplify the process of adding text guidance by reshaping the text embeddings $W\\\\in\\\\bar{\\\\mathbb{R}^{C\\\\times D\\\\times1\\\\times1}}$ into the weights of a $1\\\\times1$ convolution layer (or a linear layer), as follows:  \\n\\n$X^{\\\\prime}=X\\\\odot\\\\mathrm{si}\\\\,\\\\mathrm{gmoi}\\\\,\\\\mathrm{d}(\\\\mathfrak{m a x}(\\\\mathsf{C o n v}(X,W),\\\\mathsf{d i m}\\\\!=\\\\!1)),$ ,(4) where $X\\\\times\\\\ \\\\in\\\\ \\\\mathbb{R}^{B\\\\times D\\\\times H\\\\times W}$ and $X^{\\\\prime}\\\\ \\\\in\\\\ \\\\mathbb{R}^{B\\\\times D\\\\times H\\\\times W}$ are the input and output image features. plication with reshape or transpose ⊙is the matrix multi.  \\n\\nRe-parameterize I-Pooling Attention. The I-Pooling Attention can be re-parameterize or simplified by:  \\n\\n$$\\n\\\\tilde{X}=\\\\mathsf{c a t}\\\\big(\\\\mathsf{M P}\\\\big(X_{3},3\\\\big),\\\\mathsf{M P}\\\\big(X_{4},3\\\\big),\\\\mathsf{M P}\\\\big(X_{5},3\\\\big)\\\\big),\\n$$  \\n\\nwhere cat is th centration and $\\\\mathrm{{MP}}(\\\\cdot,\\\\,3)$ denotes the the multi-scale features in RepVL-PAN. max pooling for $3\\\\times3$ ×output features. $\\\\dot{X}$ $\\\\{X_{3},X_{4},X_{5}\\\\}$ is flattened and are has the shape of $B\\\\times D\\\\times27$ . Then we can update the text embeddings by:  \\n\\n$$\\nW^{\\\\prime}=W+{\\\\mathrm{Softmax}}(W\\\\odot\\\\tilde{X}),{\\\\mathrm{dim}}{=}{-}1)\\\\odot W,\\n$$\\n\\n# A.2. Fine-tuning Details.\\nWe remove all T-CSPLayers and Image-Pooling Attention in RepVL-PAN when transferring YOLO-World to COCO [ 25 ] object detection, which only contains 80 categories and has a relatively low dependency on visuallanguage interaction. During fine-tuning, we initialize YOLO-World using pre-trained weights. The learning rate of fine-tuning is set to 0.0002 with the weight decay set to 0.05. After fine-tuning, we pre-compute the class text embeddings with given COCO categories and store the embeddings into the weights of the classification layers.\\n\\n# B. Automatic Labeling on Large-scale ImageText Data\\nIn this section, we add details procedures for labeling region-text pairs with large-scale image-text data, e.g ., CC3M [ 44 ]. The overall labeling pipeline is illustrated in Fig. 8 , which mainly consists of three procedures, i.e ., (1) extract object nouns, (2) pseudo labeling, and (3) filtering. As discussed in Sec. 3.4 , we adopt the simple n-gram algorithm to extract nouns from captions.  \\n\\nRegion-Text Proposals. After obtaining the set of object nouns $T\\\\,=\\\\,\\\\{t_{k}\\\\}^{K}$ from the first step, we leverage a pretrained open-vocabulary detector, i.e ., GLIP-L [ 23 ], to generate pseudo boxes $\\\\{B_{i}\\\\}$ along with confidence scores $\\\\{c_{i}\\\\}$ :  \\n\\n$$\\n\\\\{B_{i},t_{i},c_{i}\\\\}_{i=1}^{N}=\\\\mathsf{G L I P-L a b e1e r}(I,T),\\n$$  \\n\\nwhere $\\\\{B_{i},t_{i},c_{i}\\\\}_{i=1}^{N}$ are the coarse region-text proposals.  \\n\\nCLIP-based Re-scoring $\\\\&$ Filtering. Considering the region-text proposals containing much noise, we present a restoring and filtering pipeline with the pre-trained CLIP [ 36 ]. Given the input image $I$ , caption $T$ , and pipeline is listed as follows: the coarse region-text proposals $\\\\{B_{i},t_{i},c_{i}\\\\}_{i=1}^{N}$ , the specific  \\n\\n• (1) Compute Image-Text Score: we forward the image $I$ with its caption $T$ into CLIP and obtain the image-text similarity score $s^{i m g}$ .  \\n• (2) Compute Region-Text Score: we crop the region images from the input image according to the region boxes $\\\\{B_{i}\\\\}$ . Then we forward the cropped images along with their texts larity $\\\\boldsymbol{S^{r}}=\\\\{s_{i}^{r}\\\\}_{i=1}^{N}$ $\\\\{t_{i}\\\\}$ into CLIP and obtain the region-text simi}.  \\n• (3) [Optional] Re-Labeling: we can forward each cropped image with all nouns and assign the noun with maximum similarity, which can help correct the texts wrongly labeled by GLIP.   \\n• (4) Rescoring: we adopt the region-text similarity $S^{r}$ to rescore the confidence scores $\\\\tilde{c_{i}}=\\\\sqrt{c_{i}*s_{i}^{r}}$ .  \\n• (5) Region-level Filtering: we first divide the region-text proposals into different groups according to the texts and then perform non-maximum suppression (NMS) to filter the duplicate predictions (the NMS threshold is set to 0.5). Then we filter out the proposals with low confidence scores (the threshold is set to 0.3).   \\n• (6) Image-level Filtering: we compute the image-level region-text scores $s^{r e g i o n}$ by averaging the kept regiontext scores. Then we obtain the image-level confidence score by $s\\\\;=\\\\;\\\\sqrt{s^{i m g}*s^{r e g i o n}}$ ∗and we keep the images with scores larger than 0.3.  \\n\\nThe thresholds mentioned above are empirically set according to the part of labeled results and the whole pipeline is automatic without human verification. Finally, the labeled samples are used for pre-training YOLO-World. We will provide the pseudo annotations of CC3M for further research.  \\n\\n  \\nFigure 8. Labeling Pipeline for Image-Text Data We first leverage the simple n-gram to extract object nouns from the captions. We adopt a pre-trained open-vocabulary detector to generate pseudo boxes given the object nouns, which forms the coarse region-text proposals. Then we use a pre-trained CLIP to rescore or relabel the boxes along with filtering.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024},{\"id\":454849335751536012,\"paper_id\":\"65b9ac94939a5f4082225348\",\"paper_title\":\"YOLO-World: Real-Time Open-Vocabulary Object Detection\",\"chunk_id\":6,\"chunk_text\":\"# C. Pre-training YOLO-World at Scale\\nWhen pre-training small models, e.g ., YOLO-World-S, a natural question we have is: how much capacity does a small model have, and how much training data or what kind of data does a small model need? To answer this question, we leverage different amounts of pseudo-labeled region-text pairs to pre-train YOLO-World. As shown in Tab. 9 , adding more image-text samples can increase the zero-shot performance of YOLO-World-S. Tab. 9 indicates: (1) adding image-text data can improve the overall zero-shot performance of YOLO-World-S; (2) using an excessive amount of pseudo-labeled data may have some negative effects for small models (YOLO-World-S), though it can improve the on rare categories $(\\\\mathbf{A}\\\\mathbf{P}_{r})$ ). However, using fine-grained annotations (GoldG) for small models can provide significant improvements, which indicates that large-scale high-quality annotated data can significantly enhance the capabilities of small models. And Tab. 3 in the main text has shown that pre-training with the combination of fine-annotated data and pseudo-annotated data can perform better. We will explore more about the data for pre-training small models or YOLO detectors in future work.  \\n\\n<html><body><table><tr><td>Method</td><td>Pre-trainedData</td><td>Samples</td><td>AP</td><td>APr</td><td>APc</td><td>AP f</td></tr><tr><td>YOLO-World-S</td><td>0365</td><td>0.61M</td><td>16.3</td><td>9.2</td><td>14.1</td><td>20.1</td></tr><tr><td>YOLO-World-S</td><td>0365+GoldG</td><td>1.38M</td><td>24.2</td><td>16.4</td><td>21.7</td><td>27.8</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-245k</td><td>0.85M</td><td>16.5</td><td>10.8</td><td>14.8</td><td>19.1</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-520k</td><td>1.13M</td><td>19.2</td><td>10.7</td><td>17.4</td><td>22.4</td></tr><tr><td>YOLO-World-S</td><td>O365+CC3M-750k</td><td>1.36M</td><td>18.2</td><td>11.2</td><td>16.0</td><td>21.1</td></tr></table></body></html>\\n\\nTable 9. Zero-shot Evaluation on LVIS. We evaluate the performance of pre-training YOLO-World-S with different amounts of data, the image-text data.\",\"original_filename\":\"Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db\",\"year\":2024}],[{\"id\":454846581288815130,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":0,\"chunk_text\":\"# Integrating the Wikidata Taxonomy into YAGO\\nFabian Suchanek, Mehwish Alam, Thomas Bonald, Pierre-Henri Paris, Jules Soria  \\n\\nTelecom Paris, Institut Polytechnique de Paris, France {first.last}@telecom-paris.fr , Abstract. Wikidata is one of the largest public general-purpose Knowledge Bases (KBs). Yet, due to its collaborative nature, its schema and taxonomy have become convoluted. For the YAGO 4 KB, we combined Wikidata with the ontology from Schema.org, which reduced and cleaned up the taxonomy and constraints and made it possible to run automated reasoners on the data. However, it also cut away large parts of the Wikidata taxonomy. In this paper, we present our effort to merge the entire Wikidata taxonomy into the YAGO KB as much as possible. We pay particular attention to logical constraints and a careful distinction of classes and instances. Our work creates YAGO 4.5, which adds a rich layer of informative classes to YAGO, while at the same time keeping the KB logically consistent.  \\n\\nKeywords: YAGO · Knowledge Base · Wikidata · Taxonomy\\n\\n# 1 Introduction\\nThe rise of Large Language Models (LLMs) such as GPT, LLaMA [48], and PaLM [9] has deeply impacted all areas of natural language processing. And yet, these models have a crucial weakness: just like humans, they are not good at rote learning [52,36,45,8,46]. They cannot memorize large amounts of facts without forgetting some of them or inventing others. This is to be expected, as they are machine learning models: they are not made to memorize, but to generalize. For this, they interpolate between the data points. At the same time, they will always cover up their lack of accuracy very eloquently: they know how to talk even when they don’t know what to say.  \\n\\nThere are, however, some applications where exact data is indispensable, e.g., when computing the distance between two cities [52], when asking for a list of Nobel Prize winners [36], or when searching for an entity with a specific property [49]. This is why Knowledge Bases (KBs) have their raison d’être :they provide crisp symbolic knowledge. Indeed, some of the actors that build the world’s largest LLMs (Google, Meta) also build some of the world’s largest KBs [29]. There are also attempts to combine KBs with LLMs [31,18], which only further confirms the necessity of KBs.  \\n\\nWikidata. One of the largest general-purpose KBs nowadays is Wikidata [50].  \\n\\nIt provides a wealth of facts about nearly every domain of common human discourse, with more than 100 million entities 1 and around 1.4 billion facts about them. Each entity has an abstract identifier (such as Q303 ), which makes the identifiers language-independent and persistent in time. Tens of thousands of people contribute to the project. At the same time, being a collaborative KB, Wikidata suffers from a lack of agreement on the schema level: there are several classes that are difficult to distinguish for the uninitiated user (e.g., geographical location (Q2221906), location (Q115095765), geographic region (Q82794), physical location (Q17334923), and geographical area (Q3622002)); there are more than ten thousand relations; constraints are defined but not enforced ( Grotesco (Q10509019) is a subclass of Q49094906, which is not a class); classes and instances are mixed ( scientist (Q901), e.g., is both a subclass of person (Q215627) and an instance of profession (Q28640)); there are more than 2.7M classes of which only 3% are instantiated (1M subclasses of chemical entity (Q43460564) have no instance); and the taxonomy contains cycles (there are 47 pairs of classes that are subclasses of each other, e.g., method (Q1799072) and technique (Q2695280), and 15 cycles of length 3 or more, e.g., axiom (Q17736), first principle (Q536351), principle (Q211364)). Finally, the abstract identifiers for Wikidata properties and entities makes downstream applications more difficult.  \\n\\nYAGO 4. The YAGO KB has been in existence since 2008 [44,14,5,20]. Its fourth version [47] was designed to address the shortcomings of Wikidata: It combines the data about instances from Wikidata with the taxonomy and properties from Schema.org – an ontology developed by a W3C Community Group 2 . Filtering and constraint enforcement made YAGO 4 a KB that allows for automated reasoning. However, this merger came at the expense of abandoning nearly the entire class taxonomy of Wikidata. That is a pity because classes can express facts that are very hard to model correctly by RDF properties [33] like something is a “train ferry route”, a “financial regulatory agency”, or a “de facto consulate”. As a consequence, one of the major criticisms that users advanced was that the class hierarchy of YAGO 4 was too sparse.  \\n\\nContributions. In this paper, we show how this shortcoming of YAGO 4 can be resolved, while still maintaining the logical consistency and semantic coherence of YAGO. We carefully incorporate selected parts of the Wikidata taxonomy into the taxonomy of Schema.org. This leads to considerable challenges. Numerous organically grown branches of the Wikidata taxonomy have to be disentangled. Furthermore, many classes in Wikidata are both instances and classes (a Doctor of Philosophy (Q752297), e.g., is both an instance of a postgraduate degree (Q23015928) and a subclass of doctorate (Q849697)). The transformation also posed engineering challenges: Wikidata comprises more than 120 GB even compressed, which have to be parsed and processed. We will describe how we surmounted these challenges, and what open problems still remain. The resulting resource, which we call YAGO 4.5, contains 109M facts and is logically consistent.  \\n\\nThis paper is structured as follows: Section 2 recalls related work, Section 3 discusses design decisions, Section 4 discusses implementation issues, and Section 5 presents the resulting KB, before Section 6 concludes.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581321583132,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":1,\"chunk_text\":\"# 2 Related Work\\nGeneral-Purpose Knowledge Bases. The Semantic Web comprises hundreds of $\\\\mathrm{KBs^{3}}$ . Many of these are tailored for specific domains or applications, such as the Gene Ontology [3] for biological processes, functions, and cellular components. However, in this paper, we are concerned with general-purpose KBs that do not focus on a given domain. There are several prominent general-purpose KBs. ConceptNet [43] is a semantic network that primarily deals with common sense knowledge. This makes it an orthogonal project to YAGO concerned with facts about instances. BabelNet [28] is a large multilingual encyclopedic dictionary derived from WordNet, Wikipedia, Wikidata, and several other sources. BabelNet focuses on relations between concepts and words, and has neither a taxonomy nor a schema. DBpedia [4] is a large-scale, multilingual KB derived from Wikipedia (and, more recently, Wikidata). Unlike YAGO, it lacks information about the temporal validity of the facts. Moreover, the automated generation from the Wikipedia infoboxes and the priority for recall over precision has led DBpedia to be not fully consistent [40,1,10]. The manually curated part contains just 4M instances $_4$ Freebase [6] was an extensive KB consisting of metadata compiled from various sources. The project was discontinued in 2015, and its content was transferred [34] to Wikidata. Wikidata [50], a project of the Wikimedia Foundation, is a collaboratively edited KB that supports other Wikimedia projects like Wikipedia and Wikimedia Commons. It is by far the largest open KB. However, due to its collaborative nature, its taxonomy has grown convoluted and complicated, with inconsistencies in hierarchies and data models as well as rule violations – making it hard to use even by contributors [7,35,41].  \\n\\nIn this landscape, YAGO positions itself as a large general KB for facts about instances, with a taxonomy, manually defined properties, and logical constraints. Its key property is that it is a centrally controlled data source, which allows it to establish certain guarantees for the quality of its data [44,14,5,20,47]. The latest version, YAGO 4 [47], was designed to be clean enough to perform automated reasoning on it. However, its taxonomy is very parsimonious, which is the challenge that we address in the present paper.  \\n\\nUpper Ontologies. Top ontologies, also known as upper or foundational ontologies, provide a domain-independent framework for organizing knowledge across various fields. We discuss some of the most prominent projects below and refer the reader to Mascardi et al. [23] for a comprehensive comparison. Cyc [17] is one of the oldest and most comprehensive upper ontologies, developed by Cycorp, aiming to represent general human knowledge and common sense reasoning. SUMO [25] is an open-source upper ontology with a formal structure for organizing and integrating domain-specific ontologies. It consists of a core set of general concepts and relations and domain-specific extensions that cover various fields, such as biology, finance, and geography. DOLCE [24] is another top ontology which focuses on capturing the ontological categories underlying natural language and human cognition. BFO [2] is an upper ontology that was created based on the ontologies related to the domain of geospatial information. WordNet [27] contains lexical and semantic relationships between sets of synonymous words (synsets). WordNet does not define properties, and the project was discontinued in 2012.  \\n\\nSchema.org [12] differs from the above in that it is a collaborative project initiated by major companies like Google, Microsoft, and Yahoo to provide a shared vocabulary for annotating Web content with structured data. While not a top ontology in the traditional sense, Schema.org plays a crucial role in the Semantic Web ecosystem by promoting standardized vocabularies for describing entities and their properties, thus facilitating data interoperability and integration. It is broadly adopted (with more than 12 million websites in 2016) and benefits from strong industry support, making it a highly reliable and sustainable choice for building a KB [26].  \\n\\nTaxonomy Induction & Expansion. Many recent studies automate taxonomy induction and expansion, including Online Catalog Taxonomy EnrichmenT (OCTET) [22], TaxoCom [16], TaxoOrder[42], TaxoExpan [38], HiExpan [39], TaxoEnrich [15], and taxonomy induction from a set of terms [21]. Our own previous YAGO versions automatically mapped WordNet synsets to Wikipedia categories [44]. In contrast to these automated approaches, YAGO 4 and the new YAGO 4.5 use a manual mapping. This is because there are only a few dozen classes to be mapped.  \\n\\nThere is a community effort to map Wikidata properties and classes to schema.org 5 . These mappings use exact match (P2888) (64 mappings), equivalent class (P1709) (332 mappings), Equivalent property (P1628) (84 mappings), external subproperty (P2236) (22 mappings), and external superproperty (P2235) (6 mappings). However, the effort was discontinued in 2017. These mappings inspired the mappings of YAGO 4, which in turn were the basis for the mappings that we use in this paper.\\n\\n# 3 Designing YAGO\\n\\n# 3.1 Design Rationale\\nOur goal is to have a clean upper taxonomy for YAGO, which is precise and non-redundant so as to allow for automated reasoning. Our choice falls on Schema.org, for reasons we have elaborated in Section 2: the taxonomy is concise, maintained by a W3C consortium, and it finds applications well beyond its original purpose of annotating Web pages. It has the right level of detail for our purposes and does not digress into philosophical concepts. It defines not just classes, but also properties.  \\n\\nOne could argue that the upper-level taxonomy is sufficient and that one should not aim to add more fine-grained classes – least of all the Wikidata taxonomy: it contains overly specific classes with few instances, some of its classes are not useful for large KB applications (such as multi-organism process (Q22269697) for elections), and sibling classes could be further grouped into common superclasses (e.g., Wikidata is missing a class that regroups human-made places, making do instead with human-made geographic feature (Q811430), human-made geographic feature (Q811463), and human-made geographic object (Q35145743)). All of this, one could argue, makes it an ill-suited candidate for a taxonomy.  \\n\\nHowever, these issues go away when a clean upper-level taxonomy is put on top: less useful lower-level classes (such as Q22269697) disappear because they are not subclasses of the clean upper-level classes. The missing grouping, likewise, can be achieved by the upper-level taxonomy – for example for places. Finally, even if a Wikidata class contains few instances, it can still carry meaningful information. For example, it is informative for the human user to know that an entity is a “General aviation monoplane with 1 tractor-piston-propeller engine” (Q33110974). It was precisely that lack of such classes that users deplored for YAGO 4. Such classes do not carry logically formalized meanings. (It would be cumbersome to formally express that something is a “General aviation monoplane with 1 tractor-piston-propeller engine” by RDF statements [32,33].) Rather, the purpose of these lower-level classes is to convey informal information for the human user. The Wikidata classes clearly serve this purpose.  \\n\\nThus, we now face the challenge of integrating the upper taxonomy from Schema.org with the lower taxonomy from Wikidata. The following design principles drive our integration:  \\n\\n1. Prefer properties over class membership. Some information can be expressed either by a property ( hasGender female ) or by a class membership (type Woman ). The option of class membership is chosen only if the class appears in the domain or range of a property. In our example, Woman does not appear as a range or domain of any property (its superclass Human does). Hence, the class Woman should not exist, and the gender should be expressed by a property. This is consistent with Wikidata’s way of modeling. When we have the choice between a property and its inverse property (e.g., hasCitizenship and hasCitizen ), we choose the one that has, on average, less objects per subject (i.e., hasCitizenship ). This, too, is consistent with major KBs [11].  \\n\\n2. The upper taxonomy exists to define formal properties that will be populated. All classes of the upper taxonomy shall define formal properties (e.g., an Airline as an iataCode , which justifies its existence as an upper-level class). Both the domain and the range of these properties have to be upperlevel classes.  \\n\\n3. The lower taxonomy exists to convey human-intelligible information about its instances in a non-redundant form. This criterion tells us to remove classes that add no information over upper taxonomy classes, to eliminate links in the taxonomy that are redundant due to transitivity, to merge classes that are hard to distinguish for the uninitiated user, and to remove classes that are not populated.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581356710430,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":2,\"chunk_text\":\"# 3.2 Upper Taxonomy\\nTop-level classes. As for YAGO 4 [47], we start with the taxonomy of Schema.org. It defines one top-level class, schema:Thing with 11 subclasses. We exclude the class Action $^6$ , which models mainly Web user actions. We exclude BioChemEntity and MedicalEntity because these require expert knowledge to model correctly, which we currently do not have in our team. This leaves us with 8 top-level classes ( CreativeWork, Event, Organization, Taxon, Person, Place, Product, Intangible ), which we accept as subclasses of Thing . All top-level classes are declared disjoint (except places/organizations, and products/creative works).  \\n\\nFictional entities. To deal with fictional entities, we add a class yago:FictionalEntity as a subclass of schema:Thing . This class defines properties such as yago:createdBy and yago:appearsIn , thus justifying its existence as a class under Design Principle 1. Fictional entities are not disjoint with any other class, as anything can also exist in fiction. A fictional entity is an instance of both yago:FictionalEntity and the class that it belongs to in fiction. For example, a fictional human is an instance of both yago:FictionalEntity and schema:Person . This has the disadvantage that fictional humans will be counted as a humans in count queries. However, it has the advantage that one can easily reason on fictional humans, as they will share all the properties that we declared for schema:Person . Wikidata goes a different way, by recreating the entire class hierarchy with its properties also for fictional beings, mapping each class of fictional entities to its real-world class counterpart. This choice can for sure be defended, but for YAGO, it would have severely convoluted the schema: It would have required duplication of all class and property specifications. The current modelization already has an advantage over the modelization in previous versions of YAGO (which simply merged real and fictional entities), as well as over other top-level ontologies such as DOLCE, BFO, and SUMO (which do not model fictional entities at all).  \\n\\nIntangibles. For our new YAGO, we added the following classes that are not in Schema.org, but that are necessary to define the ranges of properties (under Design Principle 2 above): yago:Award ,yago:Gender (which differs from schema:GenderType in that it allows more than two values), and yago:BeliefSystem (for religious adherence). All are subclasses of schema:Intangible . The other subclasses of Intangible that Schema.org defines are mostly Web-specific (e.g., ActionAccessSpecification ). Since these classes would not have instances, let alone populated properties from Wikidata, we removed them under Design Principle 2.  \\n\\nSchema.org has a subclass Occupation of Intangible , and models occupations by the property hasOccupation . However, if we model occupations by a property, (1) we lose the class hierarchy of occupations ( Physicist subClassOf Scientist etc.), and (2) we lose the ability to add properties to specific professions (such as the doctoral advisor for scientists). Hence, by Design Principle 1 above, we model professions rather as subclasses of Person .  \\n\\nPlaces. When it comes to places, the taxonomy of Schema.org is heavily oriented towards the annotation of Web pages, with subclasses such as Accommodation ,Residence ,LocalBusiness , etc. These classes do not define properties that we could populate from Wikidata, and therefore, we remove them under Design Principle 2. We then manually created a taxonomy of subclasses of schema:Place ,which distinguishes schema:Landform (areas with a boundary given by nature), schema:AdministrativeArea (boundary given by human administration) and the newly created yago:HumanMadePlace (boundaries given by human physical construction) and yago:AstronomicalObject (with boundaries in space). For the former, we add a subclass yago:Way , which regroups all ways of transit (roads, canals, railway lines, etc.).  \\n\\nGeneral considerations. Under Design Principle 2, we keep only those classes from Schema.org that add new properties (plus their super-classes, all the way up to schema:Thing ). This results in 41 upper classes. As in YAGO 4, all of the above is expressed as SHACL constraints $^7$ on the classes of Schema.org. Thus, there is no special syntax, code, or formalism for these declarations, and they are all part of the YAGO KB as normal facts.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581388692000,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":3,\"chunk_text\":\"# 3.3 Lower Taxonomy\\nMapping to Wikidata. The lower levels of the YAGO taxonomy come from Wikidata. As in YAGO 4, each class in the YAGO upper taxonomy is manually mapped to one or more classes in Wikidata. This happens, likewise, in a fully declarative way with a simple RDF statement that links the Schema.org-class by a special predicate to the Wikidata class(es). The mapping can happen at any level of the upper taxonomy: general classes such as Organization are mapped to Wikidata, and more special classes such as Corporation are mapped as well. A mapping can give rise to the following constellations:  \\n\\nOne-to-one mapping. One upper class is mapped to one Wikidata class. All of the subclasses of the Wikidata class are glued under the upper class, but its super-classes are not imported.  \\n\\nOne-to-many mapping. One upper class is mapped to several Wikidata classes. Again, all subclasses of these are glued under the upper class. This has the effect of merging the Wikidata classes. We do this, e.g., for classes that are equivalent for our purposes (such as geographical region (Q82794) and geographical area (Q3622002)).  \\n\\nOne-to-none mapping. One upper class is mapped to no Wikidata class – only its subclasses are mapped to Wikidata. This has the effect that there cannot be direct instances of the upper class. Nor can there be subclasses other than the ones we declared. This is a new mechanism that did not exist in YAGO 4. We use it for classes with a convoluted taxonomy in Wikidata.  \\n\\nWe use a one-to-none-mapping for the following classes:  \\n\\nschema:Thing. In YAGO 4, this class was mapped to the top-level class entity (Q35120) in Wikidata, which resulted in more than 1 million direct instances of schema:Thing in Yago. This defies Design Principle 2, because schema:Thing defines only very few properties. Hence, we now accept only entities that fall into one of the manually approved subclasses of Thing . This results in a clean top-level taxonomy, discarding meta-classes (e.g., class or metaclass of Wikidata ontology (Q21522864)), overlapping classes (e.g., geographic entity (Q27096213) and location (Q115095765)) and too specific classes (there are 6 top classes in Wikidata with less than 40 instances each, e.g., converter (Q35825432)).  \\n\\nschema:Place. The taxonomy of Wikidata for places is highly convoluted, with classes that are difficult to distinguish such as terrain (Q14524493), geographical location (Q2221906), geographical region (Q82794), geographical area (Q3622002), and location (Q115095765). Hence, we do not map schema:Place , and accept only instances of its manually designed subclasses. In doing so, we discard 2,861 classes (from 29,826, i.e., less than $1\\\\%$ ) and 137k instances (from 19M, i.e., less than $1\\\\%$ ).  \\n\\nschema:Intangible. Intangible classes in Wikidata, likewise, are highly convoluted, with classes such as class (Q5127848), process (Q3249551 and Q67518233) or role (Q4897819). Hence, here, too, we would have difficulties establishing properties to comply with Design Principle 2. Therefore, we apply a one-to-none mapping and accept only instances and subclasses of the manually declared subclasses of Intangible .  \\n\\nImporting the subclasses. Once the mapping has been defined manually, the subclasses of Wikidata can be imported automatically, as follows: We consider the subclass graph of Wikidata, which we construct as follows: Every entity of Wikidata that has a subClassOf relationship becomes a node in this graph. It is linked to its superclasses by the subClassOf relationship. We then iterate through all upper classes of YAGO, and whenever we hit a class that is mapped to Wikidata, we glue the entire sub-DAG of that Wikidata class under the YAGO top-level class. This approach differs from the approach in YAGO 4, where we mapped only classes with a Wikipedia article. However, this restriction limited the taxonomy to just a handful of classes per instance, which proved to be few.  \\n\\nSeveral caveats are to be respected in this merging process: As in YAGO 4, we ensure that we do not add any link that would create a loop in the taxonomy (49 loops were removed). We do not add transitive links (40k such links were removed). We do not add a link if that would make a class a transitive subclass of two top-level classes that we declared disjoint (9k links were removed). We also do not add a class that is itself mapped to YAGO upper classes to avoid duplicating the subtree.  \\n\\nExcluded classes. We exclude housekeeping classes of Wikidata that we blacklist manually: classes of Wikimedia pages, disambiguation pages, lists, and the like. For our new YAGO, we also exclude linguistic objects (such as characters (Q3241972), phrases (Q187931), numbers (Q11563), etc.), many of which are technically infinite, and would otherwise make up 700k entities in YAGO. We also remove abstract objects such as actions and occurrents, as these have rather philosophical subclasses that are of limited use for our purposes (e.g., Multi-organism process (Q22269697), etc.). The same fate is bestowed on scholarly articles. The addition of all obtainable scholarly articles to Wikidata was controversial $^8$ , and in YAGO they would make up of 39M entities, almost half of all entities. Hence, we decided to remove them. Finally, under Design Principle 3, we also remove all classes that do not have instances (1.3M).\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581421460002,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":4,\"chunk_text\":\"# 3.4 Instances\\nIdentifiers. As in YAGO 4, every instance is automatically equipped with a readable name. We use the title of the corresponding Wikipedia page as an entity identifier (as in yago:Eleanor_Roosevelt ). If there is none, or if the same Wikipedia page is used by more than one entity, we use the English label of the entity and concatenate it with the Wikidata Q-id to avoid ambiguity (as in yago:Brazilian_jiu_jitsu_competition_Q105086361 ). If there is none, we use a label that contains legal Turtle characters, concatenated with the Wikidata id (which was not done in YAGO 4). If there is no such label, we use the Wikidata id (with a YAGO-prefix for uniformity). The Turtle standard $^{9}$ allows percentage codes in local names, but many parsers (e.g., the one of Hermit $^{10}$ ) cannot deal with them. Hence, we replace all characters that are not letters or numbers by their hexadecimal Unicode, so that two identifiers that differ only in their inadmissible characters are still distinct.  \\n\\nInstances vs. Classes. Wikidata contains several items that are both instances and classes. For example, English (Q1860) is an instance of Natural Language (Q33742), as well as a subclass of Anglic (Q1346342). That makes sense, because there can be several subclasses of English , such as e.g., American English (Q7976). As per our discussion in Section 3.3, English thus becomes a class. The trouble is that OWL DL (the reasoning formalism we target) does not allow statements between instances and classes. Thus, it would be impossible to say that Eleanor Roosevelt spoke English. (That problem did not appear in YAGO 4, where intermediate classes were eliminated aggressively, and survived only as instances.) We solve this problem as follows: Whenever we encounter a Wikidata fact that would link an instance to a class, we create a generic instance of the class, and use it as object. In our example, we create a fact saying that Eleanor Roosevelt spoke yago:English_language_generic_instance . The intuition is that Roosevelt spoke something that is an instance of English. This mechanism is in line with Approach 2 in [30], and kicks in for awards, belief systems, academic titles, and languages.  \\n\\nThis technique works well for the objects of statements, which generally have an existential interpretation (Eleanor Roosevelt did not speak all dialects of English, but there is one that she spoke). It works less well for subjects of statements. For example, commercial products are classes in Wikidata (and rightly so, as different people can own different instances of the same product). However, if we take commercial products as classes, we cannot attach their manufacturer, date of inception, awards, etc. to them. It would be semantically wrong to attach these to a generic instance of the product, as they apply to the line of products itself. It would also be semantically wrong to create an axiom that says that all instances of the class have that property (not every single iPhone has won an award). Hence, we make every item that is a product (as identified by the manufacturer (P176) relation in Wikidata) an instance.\\n\\n# 3.5 Properties and Constraints\\nProperties and constraints work largely as in YAGO 4: For schema:Thing and each sub-class from Schema.org, we manually define the properties that Schema.org provides. By Design Principle 2, we keep only those properties of Schema.org that are (1) of general interest (removing specialized properties such as hasDriveThroughService ) and (2) existent in Wikidata (because otherwise they would not be populated). For the new YAGO, we added a few properties, most notably for the new top-level classes yago:Award and yago:FictionalEntity ,and also for countries (Schema.org does not contain the relation hasCapital ). An instance can have only those properties that are declared for its class or superclasses. We manually add SHACL constraints for maximum cardinality, patterns of literals (e.g., for ISBNs), and domain and range constraints. Each relation is mapped manually and declaratively to a relation in Wikidata, and populated from there (taking only the truthy facts). We extract time stamps for facts from Wikidata, and attach them to the YAGO facts in the RDF $^*$ model [13].  \\n\\nThe entire taxonomy of upper classes, the definition of properties, the accompanying SHACL constraints, and the mapping to Wikidata classes take the form of a single Turtle file. It forms an integral part of YAGO, and can be downloaded along with the rest of the KB. All of this works exactly as in YAGO 4, and we refer the interested reader to the corresponding publication.  \\n\\nDifferent from YAGO 4, the new YAGO simplifies numerical quantities: while these were previously values with a range and a unit, they are now simple literals.  \\n\\nWe now provide a detailed Design Document that lists, explains, and justifies the design decisions for the new schema on the website of YAGO 4.5.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581461568036,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":5,\"chunk_text\":\"# 4 Implementing YAGO\\nParsing, analyzing, and transforming a KB of the size of Wikidata (766 GB as of April 2023) is no easy feat. We describe here the challenges we encountered when creating YAGO 4.5, and how we surmounted them, in the hope that this will be useful for other users of Wikidata and YAGO.  \\n\\nInfrastructure. The code of YAGO 4 was written in Rust. While this ensured high performance and compile-time flagging of code problems, it also provided a formidable hurdle to other project members (most notably the project leader), and we have therefore rewritten the code from scratch in Python. The original YAGO 4 loaded the data into a RocksDB key-value store. This had the advantage that many costly operations (such as constraint checks) could be run directly on the data store. At the same time, loading the entire data into the data store and indexing it could easily take a day. The new system, therefore, stores all data (intermediate and final) in files on the hard drive, which has the advantage that intermediate results can be inspected and re-used.  \\n\\nData formats. Wikidata exists in the “full” version and the “truthy” version 11 .While the truthy version is much smaller, we need the full version to extract time stamps for facts (a feature that YAGO had since Version 2 [14]). One has the choice between the NT format (easier to parse) and the Turtle file (smaller by a factor of 2), and our choice falls on the latter. The file can be downloaded as BZ2 and as GZIP, and we strongly recommend the GZIP version. First, the unpacking is much faster (in the order of hours instead of the order of days in our case). Second, BZ2 allows no way of seeing the compressed file size without unpacking it. Finally, GZIP files can be processed sequentially by Python without unpacking them. For parsing Wikidata, we experimented with RDFlib. However, the library failed for certain characters in URIs, which caused an unrecoverable abortion in the middle of the parsing. Furthermore, the generation of URIs (expanding the prefixes) causes a large overhead. Therefore, we wrote our own Turtle parser and graph database, which, for our limited application, turned out to be rather simple (500 lines of code). These design choices mean that our code does not use any external libraries.  \\n\\nWhile the initial input (Wikidata and schema.org) is in Turtle, we chose TSV as our intermediate file format, because it allows for much faster parsing. In addition, we can attach more information to each fact (time stamp, source, etc.) in the form of supplementary columns.  \\n\\nData processing. Our system proceeds in 6 sequential steps. Each step reads the output files of the previous step, and produces new output files, as in [5]. Each of these steps can be run on its own, and each of the steps has its own set of test input files with gold-standard output files, which we can use to check if the step works as expected.  \\n\\nTwo of our steps need to process the entire Wikidata file, and this is done in a parallelized fashion. We experimented with Python multithreading, only to find that, due to the Global Interpreter Lock, it does not fully utilize multiple processor cores for CPU-bound tasks. The correct construction is multiprocessing, which uses one processor per process. Since processes cannot efficiently share data, each process has to load a copy of the data that it needs. Each process writes out its results to its own temporary file, which we then merge with the others. Each process $i$ of the $n$ processes starts at position $(i-1)/n\\\\times N$ of the Wikidata file (where $N$ is the size of the file). From that position on, the process scrolls forward to the next item declaration, where it starts its work. It proceeds until it hits the item declaration that follows position $i/n\\\\times N$ . Since the file is UTF-8 encoded, the initial position may hit the middle of a character that is encoded by more than one byte. This is not a problem because the UTF-8 standard can distinguish the middle-bytes from the initial bytes in an encoded stream.  \\n\\nSteps. We generate YAGO on a Unix machine with 90 CPUs and 800GB of RAM. We proceed in 6 steps:  \\n\\n1. Create schema: The manual definition of the schema is loaded, and the relevant parts of the Schema.org-taxonomy are extracted, as described in Section 3.2. This process operates only on manually defined files and hence terminates in less than a second on our machine.   \\n2. Create taxonomy: Wikidata is parsed for classes, and a loop-free taxonomy is constructed, as described in Section 3.3. This step is parallelized and takes 4 hours.   \\n3. Create facts: Wikidata is parsed for facts, each predicate is mapped to a YAGO predicate as described in Section 3.5, the subject of the fact is typechecked, and the objects are type checked if they are literals. The objects that are not literals cannot yet be type checked because we do not yet have a complete list of all instances at this stage. This step, likewise, is parallelized and takes 4 hours.   \\n4. Type-check facts: The previous step has given us a list of facts, which also contains for each instance the class that it belongs to. We load this list into memory and run through all facts to type-check the object of each fact. This step runs on YAGO data, and not on Wikidata, and thus does not need parallelization. It runs in 1:30h.   \\n5. Create ids: Among those facts that survived the type check, we map each entity to its legible YAGO name, as described in Section 3.5. This takes 1h.   \\n6. Create statistics: Debugging and testing are an integral part of the development. The last step counts the number of instances per class and of facts per predicate. It creates a visualization of the taxonomy and a random selection of entities for manual check. This process also takes one hour.  \\n\\nThus, the overall process takes about 12 hours.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581496957478,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":6,\"chunk_text\":\"# 5 Result\\nTable 1. Size of YAGO 4.5. Facts exclude type ,label ,comment ,alternateName ,sameAs , and mainEntityOfPage facts. In brackets: without redundant properties, properties describing literals, and properties describing scholarly articles.   \\n\\n\\n<html><body><table><tr><td></td><td>Wikidata</td><td>YAGO4</td><td>YAGO4.5</td></tr><tr><td>Individuals</td><td>103M</td><td>67M (37M)</td><td>49M</td></tr><tr><td>of which generic</td><td>0</td><td>0</td><td>62k</td></tr><tr><td>Classes</td><td>2.8M</td><td>10k</td><td>132k</td></tr><tr><td>Predicates</td><td>11k</td><td>140 (124)</td><td>112</td></tr><tr><td>Facts</td><td>500M</td><td>343M (89M)</td><td>109M</td></tr><tr><td>Type facts</td><td>106M</td><td>70M (33M)</td><td>51M</td></tr><tr><td>Labelfacts</td><td>795M</td><td>303M</td><td>468M</td></tr><tr><td>Metafacts</td><td>12M</td><td>2.5M</td><td>7M</td></tr><tr><td>Dump size</td><td>766GB</td><td>280GB</td><td>109GB</td></tr></table></body></html>  \\n\\nSize. Table 1 shows the statistics of YAGO 4.5 and puts them in perspective with Wikidata and YAGO 4. YAGO 4.5 has fewer properties than YAGO 4. This is due to the removal of inverse properties, which we removed under Design Principle 1 (e.g., we removed hasParent because we already have hasChild ; 6 such cases); properties related to scholarly articles ( citation etc., 4 in total, which we removed according to the discussion in Section 3.3); biochemical properties (11 in total, removed because of a lack of expertise in our team); and properties of numerical literals ( value etc., 6 in total, removed because literals became simple values in YAGO 4.5). The loss is thus deliberate 12 . The same goes for the facts of YAGO 4: 238M facts describe scholarly articles (mainly citations, pagination, and publication dates), 13M facts describe literals, and 2.5M facts are redundant because of an inverse property. If these are discarded, the new YAGO contains slightly more facts. The dump size is still smaller, because we use the Turtle file format instead of NT. Concerning the classes, the picture is as rosy as intended: YAGO 4.5 has vastly more classes.  \\n\\nQuality. To evaluate the quality of the new KB, we draw inspiration from the criteria for ontology evaluation that a recent survey identified [51]: Consistency refers to the absence of logical contradictions. $^{13}$ Complexity refers to the extent to which the ontology is complicated. We propose to measure it (1) by the number of top-level classes (i.e., the number of direct subclasses of Thing ; the fewer the better), and (2) by the average number of paths from an instance to the root in the taxonomic tree (the fewer the better). Modularity is the degree to which the ontology is composed of discrete subsets. In our case, these subsets are the disjoint classes, and we report the number of (non-redundant and actively enforced) class disjointness statements as an indicator. Conciseness requires the absence of redundancies, and we count the number of taxonomic loops, taxonomic links that are redundant because of transitivity, and relations whose inverse also exists. Understandability is the degree to which the ontology can be comprehended. This is difficult to operationalize, but we can at least report the percentage of identifiers that have human-readable names. Coverage refers to the degree that the ontology covers the domain knowledge. We report the number of classes and facts per instance (excluding labels etc.).  \\n\\nTable 2 shows that Wikidata has vastly more facts per instance than YAGO. This is to be expected, as YAGO is a clean subset of Wikidata. YAGO 4, too, has more facts per instance than YAGO 4.5. However, this is mainly due to the 174M facts about citations for 40M scholarly articles. YAGO 4 also has a lower number of paths to the root, which is due to its sparse taxonomy. On all other measures, YAGO 4.5 scores much better than both YAGO 4 and Wikidata: It has a clean upper level taxonomy of just 9 top-level classes – instead of the dozens of Wikidata or the thousands that YAGO 4 attached to the taxonomic root for lack of good intermediate classes. YAGO 4.5 is also free of all types of redundancy that plagued Wikidata and YAGO 4. There are just 2.4 paths to the taxonomic root on average for an individual – instead of the dozens in Wikidata. At the same time, YAGO 4.5 nearly replicates the taxonomic depth of Wikidata, with 8 classes per individual – twice as many as in YAGO 4.  \\n\\nTable 2. Quality measures, inspired by [51]   \\n\\n\\n<html><body><table><tr><td>Criterion</td><td>Operationalization</td><td>Wikidata</td><td>YAGO4</td><td>YAGO 4.5</td></tr><tr><td>Consistency</td><td>Absenceofcontradictions</td><td>no</td><td>yes</td><td>yes</td></tr><tr><td>Complexity</td><td>Top-level classes</td><td>41</td><td>2714</td><td>6</td></tr><tr><td></td><td>Paths to root</td><td>35</td><td>1.1</td><td>2.4</td></tr><tr><td>Modularity</td><td>Disjointness axioms</td><td>0</td><td>18</td><td>24</td></tr><tr><td>Conciseness</td><td>Taxonomic loops</td><td>62</td><td>0</td><td>0</td></tr><tr><td></td><td>Redundant taxonomiclinks</td><td>377k</td><td>1216</td><td>0</td></tr><tr><td></td><td>Redundantrelations</td><td>118</td><td>6</td><td>0</td></tr><tr><td></td><td>Classeswithoutinstances</td><td>2.6M</td><td>73</td><td>0</td></tr><tr><td>Understandability</td><td>Human-readable names</td><td>0%</td><td>89%</td><td>96%</td></tr><tr><td>Coverage</td><td>Classes per instance</td><td>8.4</td><td>3.6</td><td>8</td></tr><tr><td></td><td>Facts per instance</td><td>4.8</td><td>5.1</td><td>2.6</td></tr></table></body></html>  \\n\\nData Format. The file format of YAGO is Turtle. We separate the subject, predicate, object, and dot by a tabulator, so that our files are de facto also TSV files. YAGO is split into the following files:  \\n\\n– Schema : Upper taxonomy, property definitions, and SHACL constraints.   \\n– Taxonomy : The entire taxonomy of YAGO (all subClassOf facts).   \\n– Facts : All facts about entities that have an English Wikipedia page.   \\n– Beyond Wikipedia : All facts about other entities.   \\n– Meta : All temporal annotations (in the RDF\\\\* file format [13]).  \\n\\nDownstream applications can load only the files they need, and exclude, e.g., meta facts or facts about entities that do not have an English Wikipedia page.  \\n\\nApplications. The YAGO KB has found quite a number of applications in the past [37]. As every user is free to download the KB and to use it (or not), we do not have an overview of the projects that use YAGO. We just know that the KB (in various versions) has been downloaded about 6000 times during the past year (heuristically excluding bots). We expect our new YAGO to be even more useful, as it provides fine-grained information about instances by the new classes. For example, whereas the singer Elvis Presley was in a mere 4 classes in YAGO 4 (Thing, Person, Human, Twin), our new taxonomy tells us also that he was an actor, a singer, a film person, and an artist.\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024},{\"id\":454846581528152616,\"paper_id\":\"64e6d5bd3fda6d7f0652c734\",\"paper_title\":\"YAGO 4.5: A Large and Clean Knowledge Base with a Rich Taxonomy\",\"chunk_id\":7,\"chunk_text\":\"# 6 Conclusion\\nIn this paper, we have presented a method to merge the Wikidata taxonomy with the taxonomy of Schema.org, giving rise to a new version of the YAGO KB, YAGO 4.5. Our work adds a rich layer of informative classes to YAGO, while at the same time keeping YAGO logically consistent.  \\n\\nSeveral open challenges remain: First, we did not include the classes of BioChemEntities and MedicalEntities for lack of domain-specific expertise. These can be added in the future. Now that the general framework is in place, more upper classes and properties can be defined. An interesting avenue of research in this direction would be the (automated) translation of textual class descriptions (such as de facto embassy (Q5244910)) into logical properties and constraints [19]. This would require a better understanding of commonsense statements [33]. A more fundamental challenge is the treatment of classes as subjects of properties. We would like to say, e.g., that cats eat mice (which is a general statement on cats, which is why we currently model cat (Q146) as an instance), and that Garfield (Q767120) is a cat (which requires cat to be a class). These are exciting challenges for the Semantic Web community as a whole.  \\n\\nResource Availability Statement: The Web page of YAGO 4.5 is https://yago-knowledge.org It contains download links for the data, the Design Document, the documentation, the list of publications, and contributors. The Web page also offers an interactive browser for the KB. The SPARQL endpoint is at https://yago-knowledge.org/sparql . The URIs of YAGO 4.5 are dereferenceable. YAGO 4.5 is available with Creative Commons Attribution-ShareAlike License (as imposed by the license of schema.org). The source code of YAGO 4.5 is available via Github at https://github.com/yago-naga/yago-4.5 under a Creative Commons Attribution license.  \\n\\nAcknowledgement. This work was partially funded by the grant ANR-20- CHIA-0012-01 (“NoRDF”).\",\"original_filename\":\"Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db\",\"year\":2024}]]\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    \"title\": \"yolo\",\n",
    "    \"top_k\": 2,\n",
    "}\n",
    "\n",
    "\n",
    "result = db.query_by_title_like(params)\n",
    "print(result)\n",
    "# 会返回两篇文章的全文"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5 关键词查找论文ID和标题"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['65c19b5a939a5f40825fa482',\n",
      "  'SGS-SLAM: Semantic Gaussian Splatting for Neural Dense SLAM'],\n",
      " ['6434cfd690e50fcafd7a446e',\n",
      "  'Point-SLAM: Dense Neural Point Cloud-based SLAM'],\n",
      " ['655c17ef939a5f4082c42c65', 'Implicit Event-RGBD Neural SLAM'],\n",
      " ['655c1a2b939a5f4082c58efa',\n",
      "  'GS-SLAM: Dense Visual SLAM with 3D Gaussian Splatting'],\n",
      " ['6566b085939a5f40827a96db',\n",
      "  'Photo-SLAM: Real-time Simultaneous Localization and Photorealistic Mapping '\n",
      "  'for Monocular, Stereo, and RGB-D Cameras'],\n",
      " ['65cec269939a5f40828f2c0c',\n",
      "  'Loopy-SLAM: Dense Neural SLAM with Loop Closures'],\n",
      " ['65fc055e13fb2c6cf6df2527',\n",
      "  'From Variance to Veracity: Unbundling and Mitigating Gradient Variance in '\n",
      "  'Differentiable Bundle Adjustment Layers']]\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    'keyword': \"SLAM\",\n",
    "}\n",
    "result = db.query_by_keyword(params)\n",
    "ppr(result)\n",
    "# 查找关键词时需要明确大小写"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6 论文ID查找全文"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\"# SGS-SLAM: S EMANTIC GAUSSIAN SPLATTING FOR NEURAL DENSE SLAM  \\n\\nA P REPRINT  \\n\\n# Mingrui Li ∗  \\n\\nDepartment of Computer Science Dalian University of Technology 2905450254@mail.dlut.edu.cn  \\n\\nShuhong Liu ∗Department of Information Science and Technology The University of Tokyo s-liu@isi.imi.i.u-tokyo.ac.jp  \\n\\nHeng Zhou Department of Mechanical Engineering Columbia University henryzhou998@gmail.com  \\n\\nFebruary 6, 2024  \\n\\n# A BSTRACT  \\n\\nSemantic understanding plays a crucial role in Dense Simultaneous Localization and Mapping (SLAM), facilitating comprehensive scene interpretation. Recent advancements that integrate Gaussian Splatting into SLAM systems have demonstrated its effectiveness in generating high-quality renderings through the use of explicit 3D Gaussian representations. Building on this progress, we propose SGS-SLAM, the first semantic dense visual SLAM system grounded in 3D Gaussians, which provides precise 3D semantic segmentation alongside high-fidelity reconstructions. Specifically, we propose to employ multi-channel optimization during the mapping process, integrating appearance, geometric, and semantic constraints with key-frame optimization to enhance reconstruction quality. Extensive experiments demonstrate that SGS-SLAM delivers state-of-the-art performance in camera pose estimation, map reconstruction, and semantic segmentation, outperforming existing methods meanwhile preserving real-time rendering ability.  \\n\\nKeywords SLAM $\\\\cdot$ 3D Reconstruction $\\\\cdot$ 3D Semantic Segmentation  \\n\\n# 1 Introduction  \\n\\nDense Visual Simultaneous Localization and Mapping (SLAM) is a crucial problem in the field of computer vision. It aims to reconstruct a dense 3D map in an unseen environment while simultaneously tracking the camera poses in a real-time manner. Traditional visual SLAM systems Davison et al. [2007], Newcombe et al. [2011], Salas-Moreno et al. [2013], Mur-Artal et al. [2015] have yielded notable achievements in the field of sparse reconstruction, but fall short in effectively representing dense reconstruction through point clouds or voxels. To extract dense geometric information for high-fidelity representation, learning-based SLAM methods Bloesch et al. [2018], Sucar et al. [2020] have gained wild attention. They demonstrate proficiency in generating decent global 3D maps meanwhile exhibiting robustness on noises and outliers. In addition, drawing inspiration from the advancements in the neural radiance field (NeRF) Mildenhall et al. [2021], NeRF-based SLAM approaches Sucar et al. [2021], Zhu et al. [2022], Kong et al. [2023], Zhang et al. [2023], Li et al. [2023a], Wang et al. [2023] have made further progress. They excel in producing accurate and high-fidelity global reconstruction by capturing dense photometric information through differentiable rendering.  \\n\\n  \\nFigure 1: The illustration of the proposed SGS-SLAM. It employs multi-channel 2D inputs encompassing appearance, geometry, and semantic information, leveraging Gaussian Splatting and differentiable rendering for joint parameter optimization. SGS-SLAM delivers accurate 3D semantic mapping along with high-fidelity map reconstructions.  \\n\\nHowever, NeRF-based SLAM methods employ multi-layer perceptrons (MLPs) as the implicit neural representation of scenes, which introduces several challenging limitations. Primarily, MLP models struggle with over-smoothing issues at the edges of objects, leading to a lack of fine-grained details in the map. This challenge also brings difficulties on disentangling the representation of objects, making it non-trivial to segment, edit, and manipulate objects within the scene. Moreover, when applied to larger scenes, MLP models are prone to catastrophic forgetting. This means that incorporating new scenes can adversely affect the precision of previously learned models, thereby reducing overall performance. Additionally, NeRF-based methods are computationally inefficient. Since the entire scene is modeled through one or several MLPs, it necessitates extensive model tuning for adding or updating scenes.  \\n\\nIn this context, as opposite to NeRF-based neural representation, our exploration shifts towards the volumetric representation based on the 3D Gaussian Radiance Field Kerbl et al. [2023]. This approach marks a significant shift and offers notable advantages in the scene representation.  \\n\\nBenefits from its rasterization of 3D primitives, Gaussian Splatting exhibits remarkably fast rendering speeds and allows direct gradient flow to each Gaussian’s parameters. This results in an almost linear projection between the dense photometric loss and parameters during optimization, unlike the hierarchical pixel-sampling and indirect gradient flow through multiple non-linear layers seen in NeRF models. Moreover, the direct projection capability simplifies the addition of new parameters to the Gaussian field as separate channels, thereby enabling dynamic multi-channel rendering. Crucially, we integrate a semantic map into the 3D Gaussian field, essential for applications in robotics and mixed-reality, which allows for real-time switching between color, depth, and semantic color rendering. This stands in contrast to NeRF-based methods, which require the training of additional models and extra feature fusion for rendering when facing new input channels.  \\n\\nWhen compared with neural implicit semantic SLAM systems, such as DNS-SLAM Li et al. [2023a] and SNI-SLAM Zhu et al. [2023], our system demonstrates remarkable superiority in terms of rendering speed, scene precision, and segmentation accuracy. Leveraging these benefits, our method enable precise editing and manipulation of specific scene elements while preserving the high fidelity of the overall rendering. Furthermore, the utilization of explicit spatial and semantic information for identifying scene content can be instrumental in optimizing camera tracking. Particularly, we incorporate the adjustment based on geometric and semantic criteria for camera pose estimation, and utilize semantic constraints in the key-frames selection which relies on recognizing objects that have been previously observed in the trajectory.  \\n\\nOverall, our work presents several key contributions, summarized as follows:  \\n\\n•We introduce SGS-SLAM, a dense semantic SLAM system utilizing 3D Gaussians. SGS-SLAM employs an explicit volumetric representation, enabling swift and real-time camera tracking and scene mapping. More importantly, it utilizes 2D semantic maps to learn 3D semantic representations expressed by Gaussians. Compared with previous NeRF-based methods which offers over-smooth object edges, SGS-SLAM provides similar to grounth-truth level segmentation precision.   \\n•In SGS-SLAM, semantic maps provide additional supervision for optimizing parameters and selecting key frames. We employ a multi-channel parameter optimization strategy where appearance, geometric, and semantic signals collectively contribute to camera tracking and scene reconstruction. Furthermore, SGSSLAM utilizes these diverse channels for the key-frame selection during the tracking phase, concentrating on actively recognizing objects seen earlier in the trajectory. This approach results in efficient and high-quality map reconstruction based on the chosen key frames.   \\n•Utilizing semantic representation, SGS-SLAM provides a highly accurate disentangled object representation in 3D scenes, laying a solid foundation for downstream tasks such as scene editing and manipulation. SGS-SLAM facilitates the dynamic moving, rotating, or removal of objects in the map in real-time. This is achieved by identifying the semantic labels of the objects, while ensuring the rest of the scene remains unchanged and stable.  \\n\\nExtensive experiments are conducted on both synthetic and real-world scene benchmarks. These experiments compare our method against both NeRF-based implicit approaches and novel 3D-Gaussian-based methods, evaluating performance in mapping, tracking, and semantic segmentation.  \\n\\n# 2 Related Work  \\n\\n# 2.1 Semantic SLAM  \\n\\nSemantic information is of great importance for SLAM systems Mur-Artal et al. [2015], He et al. [2023], Whelan et al. [2015], Qin et al. [2018], which is a crucial requirement for applications in robotics and VR/AR fields. Real-time dense semantic SLAM systems Salas-Moreno et al. [2013], Bloesch et al. [2018], Rosinol et al. [2020] can integrate semantic information into 3D geometric representations. Traditional semantic SLAM systems rely on explicit 3D semantic expressions, such as voxel Hermans et al. [2014], point cloud Narita et al., and signed distance field Narita et al.. These methods have limitations in terms of reconstruction speed, high-fidelity model acquisition, and memory usage. Moreover, traditional methods cannot reasonably infer unknown areas.  \\n\\n# 2.2 Neural Implicit SLAM  \\n\\nMethods based on NeRF McCormac et al. [2018], which handle complex topological structures and differentiable scene representation methods, have garnered significant attention, leading to the development of neural implicit SLAM methods Li et al. [2023b], Chung et al. [2023]. iMAP Sucar et al. [2021] uses a single MLP for scene representation, which is limited in large-scale scenes. NICE-SLAM Zhu et al. [2022] uses pre-trained multiple MLPs for hierarchical scene representation. Co-SLAM Wang et al. [2023] combines pixel set-based keyframe tracking with One-blob encoding. Go-SLAM Zhang et al. [2023] uses Droid-SLAM front-end tracking and multi-resolution hash encoding Müller et al. [2022] for mapping while implementing loop closure detection and global optimization. However, these methods cannot utilize semantic information in maps. NIDS-SLAM Haghighi et al. [2023] leverages the mature front-end tracking of ORB-SLAM3 Campos et al. [2021] and Instant-NGP Müller et al. [2022] for mapping but does not optimize joint semantic features for 3D reconstruction. DNS-SLAM Li et al. [2023a] proposes a 2D semantic prior system that provides multi-view geometry constraints but does not optimize 3D reconstruction with semantic features. SNI-SLAM Zhu et al. [2023], a work parallel to ours, introduces semantic loss for geometric supervision but remains limited by the efficiency constraints of NeRF’s volume rendering.  \\n\\n# 2.3 3D Gaussian Splatting-SLAM  \\n\\nThe outstanding performance and fast rasterization capabilities of 3D Gaussian Splatting Kerbl et al. [2023] enable higher efficiency and accuracy on sparse pixel bases. However, existing 3DGS-based SLAM systems Yan et al. [2023], Keetha et al. [2023], Huang et al. [2023] lack traditional effective loop closure detection, limiting tracking accuracy and the ability to recognize semantic information in scenes. We fuse semantic features into geometry and appearance and integrate 3D semantic features during the tracking process for loop closure detection. This allows us to obtain more effective and higher-resolution scene segmentation results while maintaining real-time performance.  \\n\\n# 3 Method  \\n\\n# 3.1 Multi-Channel Gaussian Representation  \\n\\nThe scene is represented using a gaussian influence function $f(\\\\cdot)$ on the map, For simplicity, these Gaussians are isotropic, as proposed in Keetha et al. [2023]:  \\n\\n$$\\nf^{\\\\mathrm{3D}}(x)=\\\\sigma\\\\exp\\\\left(-{\\\\frac{\\\\|x-\\\\mu\\\\|^{2}}{2r^{2}}}\\\\right)\\n$$  \\n\\nHere, $\\\\sigma\\\\in[0,1]$ ind $\\\\mu\\\\in\\\\mathbb{R}^{3}$ represents the center position, and $r$ denotes the radius. Each Gaussian also carries RGB colors $c_{i}=[r_{i}\\\\;b_{i}\\\\;g_{i}]^{\\\\dot{T}}$ .  \\n\\nIn order to optimize the parameters of Gaussians to represent the scene, we need to render the Gaussians into 2D images in a differentiable manner. We use the render from Luiten et al. [2024], providing extended functionality of rendering depth in colors. It works by splatting 3D Gaussians into the image plane by approximating the projection of the integral of the infl nce function $\\\\bar{f}(\\\\bar{\\\\cdot})$ along the depth dimension in pixel coordinate. The center of the Gaussian $\\\\mu$ , radius $r$ ,and depth d(in camera coordinates) is splatted using the standard point rendering formula:  \\n\\n$$\\n\\\\mu^{\\\\mathrm{2D}}=K\\\\frac{E_{t}\\\\mu}{d},\\\\quad r^{\\\\mathrm{2D}}=\\\\frac{l r}{d},\\\\quad d=(E_{t}\\\\mu)_{z}\\n$$  \\n\\nwhere $K$ is the camera intrinsic matrix, $E_{t}$ is the extrinsic matrix capturing the rotation and translation of the camera at frame $t$ ,$l$ is the focal length. The influence of all Gaussians on this pixel can be combined by sorting the Gaussians in depth order and performing front-to-back volume rendering using the Max volume rendering formula Max [1995]:  \\n\\n$$\\nC_{\\\\mathrm{pix}}=\\\\sum_{i=1}^{n}c_{i}f_{i,\\\\mathrm{pix}}^{\\\\mathrm{2D}}\\\\prod_{j=1}^{i-1}(1-f_{j,\\\\mathrm{pix}}^{\\\\mathrm{2D}})\\n$$  \\n\\nThe pixel-level rendered color $C_{\\\\mathrm{pix}}$ is the sum over the colors of each Gaussian $c_{i}$ and weighted by the influence function $f_{i,\\\\mathrm{pix}}^{\\\\mathrm{2D}}$ (replace the 3D means and covariance matrices with the 2D splatted versions), multiplied by an occlusion term taking into account the effect of all Gaussians in front of the current Gaussian.  \\n\\nSimilarly, the depth can be rendered as  \\n\\n$$\\nD_{\\\\mathrm{pix}}=\\\\sum_{i=1}^{n}d_{i}f_{i,\\\\mathrm{pix}}^{\\\\mathrm{2D}}\\\\prod_{j=1}^{i-1}(1-f_{j,\\\\mathrm{pix}}^{\\\\mathrm{2D}})\\n$$  \\n\\nwhere $d_{i}$ denotes the depth of each Gaussian. By setting $d_{i}\\\\,=\\\\,1$ , we are able to calculate a silhouette, $S i l_{\\\\mathrm{pix}}\\\\,=$ $D_{\\\\mathrm{pix}}(d_{i}=1)$ , which assists in determining whether a pixel is visible in the current view. This aspect of visibility is essential for camera pose estimation, as it relies on the current reconstructed map. Additionally, it is also employed in map reconstruction, where new Gaussians are introduced in pixels lacking sufficient information.  \\n\\nWhile acquiring 3D semantic information is challenging and usually demands extensive manual labeling, 2D semantic label is a more accessible prior. In our approach, we leverage 2D semantic labels, which are often provided in datasets or can be easily obtained using state-of-the-art methods. We assign distinct channels to the parameters of Gaussians to denote their semantic labels and colors. During the rendering process, the 2D semantic map can be rendered from the reconstructed 3D scene as follows:  \\n\\n$$\\nS_{\\\\mathrm{pix}}=\\\\sum_{i=1}^{n}s_{i}f_{i,\\\\mathrm{pix}}^{\\\\mathrm{2D}}\\\\prod_{j=1}^{i-1}(1-f_{j,\\\\mathrm{pix}}^{\\\\mathrm{2D}})\\n$$  \\n\\nwhere $s_{i}=[r_{i}\\\\ b_{i}\\\\ g_{i}]^{T}$ denotes the semantic color associated with the Gaussian. This semantic color is optimized jointly with the appearance color and depth during the mapping process.  \\n\\n# 3.2 Tracking and Mapping  \\n\\nLike previous SLAM techniques, our method can be split into two processes: tracking and mapping. Tracking process estimates camera pose of each frame while keeping the scene parameters fixed. Mapping optimizes the scene representations based on the estimated camera pose. We break down steps into the following sections and explain in details.  \\n\\n# 3.2.1 Camera Pose Estimation  \\n\\nGiven the first frame, the camera pose is set to identity and use as the reference coordinates for the following tracking and mapping procedure. While assessing the camera pose of an RGB-D view at a new timestep, the initial camera pose is determined by adding a displacement to the previous pose, assuming constant velocity, as $E_{t+1}=E_{t}+\\\\left(E_{t}-E_{t-1}\\\\right)$ .Following this, the current pose is iteratively refined by minimizing the tracking loss between the ground truth color $(C_{\\\\mathrm{pix}}^{G T})$ ), depth images $(D_{\\\\mathrm{pix}}^{G T})$ ), and semantic map $(S_{\\\\mathrm{pix}}^{G T})$ ) and their differentiably rendered views:  \\n\\n$$\\n{\\\\mathcal{L}}_{\\\\mathrm{tracking}}=\\\\sum_{\\\\mathrm{pix}}(S i l_{\\\\mathrm{pix}}>T_{S})(\\\\lambda_{D}|D_{\\\\mathrm{pix}}^{G T}-D_{\\\\mathrm{pix}}|+\\\\lambda_{C}|C_{\\\\mathrm{pix}}^{G T}-C_{\\\\mathrm{pix}}|+\\\\lambda_{S}|S_{\\\\mathrm{pix}}^{G T}-S_{\\\\mathrm{pix}}|)\\n$$  \\n\\nHere, only those rendered pixels with a sufficiently large silhouette are factored into the loss calculation. The threshold $T_{S}$ is designed to make use of the map that has been previously optimized and has high certainty to be visible in the current camera view.  \\n\\n# 3.2.2 Key-frames Selection and Weighting  \\n\\nDuring the tracking phase of SLAM systems, key frames are identified and stored simultaneously. These key frames, providing different views of objects, are critical for mapping to refine 3D scene reconstruction. SGS-SLAM captures and stores key frames at constant time intervals. For mapping, key frames associated with the current frame are chosen based on geometric and semantic constraints. Specifically,we randomly select pixels from the current frame and extract their corresponding Gaussians $G_{\\\\mathrm{sample}}$ in the 3D scene. These Gaussians, $G_{\\\\mathrm{sample}}$ , are then projected onto the camera views of key frames as $G_{\\\\mathrm{proj}}$ . The $\\\\bar{G}_{\\\\mathrm{proj}}$ are evaluated based on the geometric overlap ratio:  \\n\\n$$\\n\\\\eta=\\\\frac{1}{\\\\sum G_{\\\\mathrm{proj}}}\\\\sum_{n=i}\\\\{G_{i}|0\\\\leq w i d t h(G_{i})\\\\leq W,0\\\\leq h e i g h t(G_{i})\\\\leq H\\\\}\\n$$  \\n\\nIt represents the proportion of Gaussians captured within the camera view of the key frames. $W$ and $H$ are the width and height of the camera view. The top $K$ candidates are selected from this ranking. After the initial geometric-based selection, a second selection is conducted based on semantic criteria. We discard key frames whose semantic maps $S_{\\\\mathrm{pix}}$ are identical to the current frame’s semantic map, as indicated by a high mIoU score. This threshold intents to enhance map optimization from varying viewpoints, preferring views with low mIoU overlap. In addition, we compute an uncertainty score for each key frame, defined as $\\\\bar{\\\\mathcal{U}}(t)=\\\\bar{e}^{-\\\\tau t}$ , with $t$ representing th mp of the key frame and $\\\\tau$ being a decay coefficient. This uncertainty score is used to weight the mapping loss is that key frames with a later timestamp index carry a higher uncertainty in reconstruction due to the accumulation of L$\\\\mathcal{L}_{\\\\mathrm{mapping}}$ . The intuition behind this camera tracking errors along the trajectory.  \\n\\n# 3.2.3 Map Reconstruction  \\n\\nThe scene is modeled using Gaussians across three distinct channels: (1) their mean coordinates represent the geometric information of the scene, (2) their appearance colors depict the scene’s visual appearance, and (3) their semantic colors indicate the semantic labels of objects. These parameters across the channels are jointly optimized during the process of Gaussian densification and optimization, while the camera pose, ascertained from tracking, remains fixed.  \\n\\nStarting with the first frame, all pixels contribute to initializing the map. In the process of map reconstruction at a new timestep, new Gaussians are introduced to areas of the map that are either insufficiently dense or display new geometry in front of the previously estimated map. The addition of new Gaussians is regulated by applying a mask to the pixels where either (ii) the silhouette value $S i l_{\\\\mathrm{pix}}$ falls below a certain threshold, signifying a high uncertainty in visibility, or (ii) $D_{\\\\mathrm{pix}}^{G T}<<D_{\\\\mathrm{pix}}$ , indicating that the ground-truth depth is much smaller than the estimated depth, and thus suggesting the presence of new geometrical features.  \\n\\nAfter densification, the parameters of the map are optimized by minimizing the mapping loss as:  \\n\\n$$\\n\\\\mathcal{L}_{\\\\mathrm{mapping}}=\\\\mathcal{U}\\\\sum_{\\\\mathrm{pix}}\\\\lambda_{D}|D_{\\\\mathrm{pix}}^{G T}-D_{\\\\mathrm{pix}}|+\\\\lambda_{C}\\\\mathcal{L}_{C}+\\\\lambda_{S}\\\\mathcal{L}_{S}\\n$$  \\n\\nare weighted SSIM loss Kerbl et al. [2023] with respect to appearance image and semantic image:  \\n\\n$$\\n\\\\mathcal{L}(I_{\\\\mathrm{pix}})=\\\\sum_{\\\\mathrm{pix}}\\\\alpha|I_{\\\\mathrm{pix}}^{G T}-I_{\\\\mathrm{pix}}|+(1-\\\\alpha)(1-s s i m(I_{\\\\mathrm{pix}}^{G T},I_{\\\\mathrm{pix}}))\\n$$  \\n\\nHere, $\\\\lambda_{D},\\\\lambda_{C},\\\\lambda_{S}$ , and $\\\\alpha$ are predefined hyperparameters, and $\\\\boldsymbol{\\\\mathcal{U}}$ is the uncertainty score defined in section 3.2.2.  \\n\\nCompared to current NeRF-based methods, which demand complex model architectures and feature fusion strategies for the optimization of geometric, appearance, and semantic features, Gaussian representation offers a notable advantage. The complexity in NeRF-based methods primarily arises from their implicit representation of scenes, where each feature is modeled by a MLP separately, often leading to limited performance. In contrast, Gaussian representation, with its explicit definition of scene parameters, facilitates direct gradient flow to each parameter. This enables the joint optimization of parameters across different channels, remarkably enhancing the efficiency and effectiveness of both mapping and segmentation processes.  \\n\\n# 4 Experiment  \\n\\n# 4.1 Experimental Setup  \\n\\nDatasets We evaluate our method on both synthetic and real-world datasets. To compare with other neural implicit SLAM methods, we evaluate on 8 synthetic scenes from Replica dataset Straub et al. [2019] and real-world scenes from ScanNet Dai et al. [2017a]. The ground-truth camera pose and semantic map of Replica are offered from simulation, and ground-truth camera pose of ScanNet is generated by BundleFusion Dai et al. [2017b].  \\n\\nMetrics We use PSNR, Depth-L1 (on 2D depth map), SSIM, and LPIPS to evaluate the reconstruction quality. For the evaluation of camera pose, we adopt the average absolute trajectory error (ATE RMSE). For semantic segmentation, we calculate mIoU score.  \\n\\nBaselines We compare the tracking and mapping with state-of-the-art methods iMap Sucar et al. [2021], Vox-Fusion Yang et al. [2022], NICE-SLAM Zhu et al. [2022], Co-SLAM Wang et al. [2023], ESLAM Johari et al. [2023], and SplaTAM Keetha et al. [2023]. For semantic segmentation accuracy, we compare with NIDS-SLAM Haghighi et al. [2023], DNS-SLAM Li et al. [2023a], and SNI-SLAM Zhu et al. [2023].  \\n\\n# 4.2 Experimental Results  \\n\\nWe show quantitative measures of reconstruction quality using the Replica dataset in Table 1. Our method demonstrates state-of-the-art performance. When compared to other baseline methods, our approach attains notably superior outcomes, outperforming them by a margin of 10dB in PSNR.  \\n\\nIn Figure 2, we present the reconstruction results of three chosen scenes, where regions of interest are accentuated with boxes in various colors. Our method exhibits high-fidelity reconstruction outcomes. Specifically, for small, intricately textured objects like a clock, socket, books on a tea table, and a lamp, our approach shows remarkable accuracy over NeRF-based methods. This is because Gaussians are capable of representing objects with complex textures and surfaces. Furthermore, NeRF-based methods often struggle with the over-smoothing issue, resulting in blurred edges on objects. In contrast, by utilizing an explicit Gaussian representation, SGS-SLAM precisely captures objects with clear edges, irrespective of their sizes. Compared with SplaTAM, which is also a Gaussian-based model, our approach utilizes semantic information for discerning object categories, recognizing visual appearance to determine texture, and applies geometric constraints to preserve accurate shapes. This combination enables our method to achieve thorough modeling of both objects and their surrounding environment. The combination of these constraints allows SGS-SLAM to capture fine-grained details of objects, offering high-fidelity and accurate reconstruction.  \\n\\nTable 2 displays the tracking evaluation results on the Replica dataset. Our method excels in achieving the highest level of depth L1 loss (cm) and minimal ATE error, surpassing baseline methods by $70\\\\%$ in terms of depth loss and $34\\\\%$ in terms of ATE RMSE (cm). This exceptional performance can be attributed to our precise scene reconstruction, which provides finely-detailed rendering results. The high-quality rendering, in turn, contributes to accurate camera pose estimation based on the established map by preventing incorrect geometric reconstruction, which could otherwise result in inaccurate tracking outcomes. Additionally, utilizing features from different channels of Gaussians, such as geometry, appearance, and semantic information, provides multiple levels of supervision, resulting in a more robust and accurate tracking capability.  \\n\\n  \\nFigure 2: Qualitative comparison of our method and the baselines for reconstruction across three scenes from the Replica Dataset, with key details accentuated using colorful boxes. The results demonstrate that our method delivers more high-fidelity and robust reconstructions, particularly by capturing more detailed features of the objects in the scene.  \\n\\n# 4.3 3D Semantic Segmentation  \\n\\nSGS-SLAM is the first Gaussian-based SLAM system that accomplishes simultaneous localization, reconstruction, and segmentation in a single framework. Table 3 shows a quantitative evaluation of our method in comparison to other neural semantic SLAM approaches. It’s worth noting that we only show four scenes because previous NeRF-based semantic models only reported results on these scenes. In comparison to these previous methods, SGS-SLAM demonstrates state-of-the-art performance, outperforming the initial baseline by more than $10\\\\%$ . Substantial enhancement highlights the crucial advantage of explicit Gaussian representation over implicit NeRF representation. Gaussians can precisely isolate object boundaries, resulting in highly accurate 3D scene segmentation. In contrast, NeRF-based methods often struggle to recognize individual objects and typically require complex muti-level model designs and extensive feature fusion. Our approach offers an unparalleled ability to identify 3D objects in decomposed representations, which can serve as 3D priors for tracking and mapping in future time steps, and is well-suited for further downstream tasks.  \\n\\n<html><body><table><tr><td>Methods</td><td>Metrics</td><td>Average</td><td>Room0</td><td>Rooml</td><td>Room2</td><td>Office0</td><td>Officel</td><td>Office2</td><td>Office3</td><td>Office4</td></tr><tr><td></td><td>PSNR↑</td><td>24.42</td><td>22.12</td><td>22.47</td><td>24.52</td><td>29.07</td><td>30.34</td><td>19.66</td><td>22.23</td><td>24.94</td></tr><tr><td rowspan=\\\"3\\\">NICE-SLAM</td><td>SSIM↑</td><td>0.809</td><td>0.689</td><td>0.757</td><td>0.814</td><td>0.874</td><td>0.886</td><td>0.797</td><td>0.801</td><td>0.856</td></tr><tr><td>LPIPS↓</td><td>0.233</td><td>0.330</td><td>0.271</td><td>0.208</td><td>0.229</td><td>0.181</td><td>0.235</td><td>0.209</td><td>0.198</td></tr><tr><td>PSNR↑</td><td>24.41</td><td>22.39</td><td>22.36</td><td>23.92</td><td>27.79</td><td>29.83</td><td>20.33</td><td>23.47</td><td>25.21</td></tr><tr><td rowspan=\\\"3\\\">Vox-Fusion</td><td>SSIM↑</td><td>0.801</td><td>0.683</td><td>0.751</td><td>0.798</td><td>0.857</td><td>0.876</td><td>0.794</td><td>0.803</td><td>0.847</td></tr><tr><td>LPIPS↓</td><td>0.236</td><td>0.303</td><td>0.269</td><td>0.234</td><td>0.241</td><td>0.184</td><td>0.243</td><td>0.213</td><td>0.199</td></tr><tr><td>PSNR↑</td><td>30.24</td><td>27.27</td><td>28.45</td><td>29.06</td><td>34.14</td><td>34.87</td><td>28.43</td><td>28.76</td><td>30.91</td></tr><tr><td rowspan=\\\"3\\\">Co-SLAM</td><td>SSIM↑</td><td>0.939</td><td>0.910</td><td>0.909</td><td>0.932</td><td>0.961</td><td>0.969</td><td>0.938</td><td>0.941</td><td>0.955</td></tr><tr><td>LPIPS↓</td><td>0.252</td><td>0.324</td><td>0.294</td><td>0.266</td><td>0.209</td><td>0.196</td><td>0.258</td><td>0.229</td><td>0.236</td></tr><tr><td>PSNR↑</td><td>29.08</td><td>25.32</td><td>27.77</td><td>29.08</td><td>33.71</td><td>30.20</td><td>28.09</td><td>28.77</td><td>29.71</td></tr><tr><td rowspan=\\\"3\\\">ESLAM</td><td>SSIM↑</td><td>0.929</td><td>0.875</td><td>0.902</td><td>0.932</td><td>0.960</td><td>0.923</td><td>0.943</td><td>0.948</td><td>0.945</td></tr><tr><td>LPIPS↓</td><td>0.336</td><td>0.313</td><td>0.298</td><td>0.248</td><td>0.184</td><td>0.228</td><td>0.241</td><td>0.196</td><td>0.204</td></tr><tr><td>PSNR↑</td><td>34.15</td><td>32.50</td><td>34.25</td><td>35.10</td><td>38.54</td><td>39.10</td><td>31.90</td><td>30.05</td><td>31.75</td></tr><tr><td rowspan=\\\"3\\\">Ours</td><td>SSIM↑</td><td>0.973</td><td>0.976</td><td>0.978</td><td>0.982</td><td>0.984</td><td>0.982</td><td>0.965</td><td>0.966</td><td>0.949</td></tr><tr><td>LPIPS↓</td><td>0.096</td><td>0.070</td><td>0.094</td><td>0.070</td><td>0.086</td><td>0.087</td><td>0.101</td><td>0.115</td><td>0.148</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table></body></html>\\n\\nTable 1: Quantitative comparison of our method and the baselines in training view rendering on the Replica dataset. Our method demonstrates SOTA performances on all three metrics.  \\n\\n<html><body><table><tr><td>Methods</td><td>Depth L1↓ [cm]</td><td>ATEMean↓ [cm]</td><td>ATERMSE↓ [cm]</td><td>Param. [Mb]</td><td>Track.FPS↑ Unit</td><td>Map. FPS↑ Unit</td><td>SLAMFPS↑ Unit</td></tr><tr><td>iMAP</td><td>4.645</td><td>3.118</td><td>4.153</td><td>1.04</td><td>9.92</td><td>2.23</td><td>1.82</td></tr><tr><td>NICE-SLAM</td><td>1.903</td><td>1.795</td><td>2.503</td><td>12.02</td><td>13.70</td><td>0.20</td><td>0.20</td></tr><tr><td>Co-SLAM</td><td>1.513</td><td>0.935</td><td>1.059</td><td>0.26</td><td>17.24</td><td>10.20</td><td>6.41</td></tr><tr><td>ESLAM</td><td>1.180</td><td>0.520</td><td>0.630</td><td>6.79</td><td>18.11</td><td>3.62</td><td>3.02</td></tr><tr><td>Ours</td><td>0.356</td><td>0.327</td><td>0.412</td><td>0.01</td><td>5.27</td><td>3.52</td><td>2.11</td></tr></table></body></html>\\n\\nTable 2: Quantitative comparison in terms of Depth, ATE, memory usage, and FPS between our method and the baselines on the Replica dataset. The values of baselines are retrieved from Zhu et al. [2023]. Our method remarkably outperforms the baselines at Depth and ATE evaluations, and performs fairly on FPS metrics. Note that for parameter size, we only count the parameters of the model.  \\n\\n# 4.3.1Key Frame Optimization  \\n\\nIn real-world datasets, tracking errors tend to accumulate along a trajectory, making pose estimations at later timestamps less reliable. Such inaccuracies can compromise the quality of map reconstructions, negatively impacting the accuracy of a previously well-established scene. A case in point is scene0000 from the ScanNet dataset, where a objects such as bike and guitar are revisited at early and late stages in the trajectory. Key frames from later in the sequence, influenced by inaccurate camera poses, can disrupt the previously accurate reconstructions of these objects. Figure 3 illustrates the novel-view evaluation for scene0000. In comparison to SplaTAM, depicted in the upper row, our method delivers more accurate reconstruction outcomes. The bike, garbage bin, and guitar are accurately rendered, meanwhile details are preserved. Our method facilitates the selection of key frames base on geometric and semantic constraints, incorporating an uncertainty weighting during the optimization of selected key frames. This strategy demonstrate its effectiveness in map optimization from different views meanwhile preventing the unreliable key frame with high uncertainty to significantly altering the earlier accurately reconstructed map.  \\n\\n# 4.3.2 Scene Manipulation  \\n\\nThe obtained semantic mask within the 3D scene has a range of applications for subsequent tasks. As an illustrative example, we demonstrate an straightforward but efficient Gaussian editing method, which is crucial for enabling scene manipulation for robotics or mixed reality applications. Specifically, the Gaussians generated during mapping, defined by equation 1, can be further utilized as:  \\n\\n$$\\nf_{\\\\mathrm{edit}}^{\\\\mathrm{3D}}(G,\\\\tilde{y})=\\\\mu(G,\\\\tilde{y})\\\\cdot\\\\Phi_{T}(f^{\\\\mathrm{3D}}(G),\\\\tilde{y})\\n$$  \\n\\n<html><body><table><tr><td>Methods</td><td>Avg. mIoU↑</td><td>Room0</td><td>Rooml</td><td>Room2</td><td>Officeo</td></tr><tr><td>NIDS-SLAM</td><td>82.37</td><td>82.45</td><td>84.08</td><td>76.99</td><td>85.94</td></tr><tr><td>DNS-SLAM</td><td>84.77</td><td>88.32</td><td>84.90</td><td>81.20</td><td>84.66</td></tr><tr><td>SNI-SLAM</td><td>87.41</td><td>88.42</td><td>87.43</td><td>86.16</td><td>87.63</td></tr><tr><td>Ours</td><td>92.72</td><td>92.95</td><td>92.91</td><td>92.10</td><td>92.90</td></tr></table></body></html>  \\n\\n  \\nTable 3: Quantitative comparison of our method against existing semantic NeRF-based SLAM methods on the Replica dataset. The baselines are limited to four scenes as their results are reported only for these. For each scene, we compute the average mIoU score by comparing the rendered and the ground-truth 2D semantic image in the training view. Our method significantly outperforms the NeRF-based approaches, achieving mIoU scores over $90\\\\%$ .  \\nFigure 3: The selected novel view evaluation of scene0000 from the ScanNet dataset. The rendered views display the reconstructed bike and guitar captured within the trajectory. Our method outperforms SplaTAM by a large margin primarily due to the integration of key-frame optimization.  \\n\\nwhere the edited Gaussians, $f_{\\\\mathrm{edit}}^{\\\\mathrm{3D}}$ , are influenced by the visibility function $\\\\mu$ , transition function $\\\\Phi_{T}$ , and the Gaussian’s semantic label $\\\\tilde{y}$ . The visibility function $\\\\mu$ determines if the Gaussians should be retained (1) or removed (0) based on $\\\\tilde{y}$ . The transition function $\\\\Phi_{T}$ applies a transformation to the Gaussian’s coordinates on selected $\\\\tilde{y}$ , enabling spatial manipulation.  \\n\\nUtilizing the decoupled scene representation, in contrast to NeRF-based approaches that demand fine-tuning of the entire network, we have the ability to choose specific objects within the scene by referencing their semantic mask while keeping the remainder of the well-trained, irrelevant environment fixed. As shown in Figure 4, for object removal, we can directly erase the Gaussians associated with the editing target, such has removing the table while preserving all the items on it. In addition, we can group objects by selecting their semantic masks and apply translation and rotation, such as moving and rotating both the table and the above objects to a different place. This editing capability requires no training or fine-tuning, making it readily available for downstream applications.  \\n\\n# 5 Conclusion  \\n\\nWe presented SGS-SLAM, the first semantic dense visual SLAM system based on the 3D Gaussian representation. We propose to leverage multi-channel parameter optimization where appearance, geometric, and semantic constraints are combined to enforce high-accurate 3D semantic segmentation, and high-fidelity dense map reconstruction meanwhile effectively produce a robust camera pose estimation. SGS-SLAM takes the advantages of optimal key-frame optimization, resulting on reliable reconstruction quality. Extensive experiments show that our method provide state-of-the-art tracking and mapping results, meanwhile maintain rapid rendering speeds. Furthermore, the high-quality reconstruction of scenes and precise 3D semantic labeling generated by our system establish a strong foundation for the downstream tasks such as scene editing, offers solid prior for the robotics or AR/VR applications.  \\n\\n  \\nFigure 4: The case study on scene manipulation in room0 of the Replica dataset. We show the capabilities for object removal and transformation by specifying semantic labels. SGS-SLAM allows manipulation of either individual objects or a group of items, as illustrated by actions that include the removal of a table, as well as moving and rotating the table together with all objects on it.  \\n\\n# References  \\n\\nAndrew J Davison, Ian D Reid, Nicholas D Molton, and Olivier Stasse. Monoslam: Real-time single camera slam. IEEE transactions on pattern analysis and machine intelligence , 29(6):1052–1067, 2007.   \\nRichard A Newcombe, Steven J Lovegrove, and Andrew J Davison. Dtam: Dense tracking and mapping in real-time. In 2011 international conference on computer vision , pages 2320–2327. IEEE, 2011.   \\nRenato F Salas-Moreno, Richard A Newcombe, Hauke Strasdat, Paul HJ Kelly, and Andrew J Davison. Slam $^{++}$ :Simultaneous localisation and mapping at the level of objects. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 1352–1359, 2013.   \\nRaul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics , 31(5):1147–1163, 2015.   \\nMichael Bloesch, Jan Czarnowski, Ronald Clark, Stefan Leutenegger, and Andrew J Davison. Codeslam—learning a compact, optimisable representation for dense visual slam. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 2560–2568, 2018.   \\nEdgar Sucar, Kentaro Wada, and Andrew Davison. Nodeslam: Neural object descriptors for multi-view shape reconstruction. In 2020 International Conference on 3D Vision (3DV) , pages 949–958. IEEE, 2020.   \\nBen Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM , 65(1):99–106, 2021.   \\nEdgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J Davison. imap: Implicit mapping and positioning in real-time. In Proceedings of the IEEE/CVF International Conference on Computer Vision , pages 6229–6238, 2021.   \\nZihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 12786–12796, 2022.   \\nXin Kong, Shikun Liu, Marwan Taher, and Andrew J Davison. vmap: Vectorised object mapping for neural field slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 952–961, 2023.   \\nYoumin Zhang, Fabio Tosi, Stefano Mattoccia, and Matteo Poggi. Go-slam: Global optimization for consistent 3d instant reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision , pages 3727–3737, 2023.   \\nKunyi Li, Michael Niemeyer, Nassir Navab, and Federico Tombari. Dns slam: Dense neural semantic-informed slam. arXiv preprint arXiv:2312.00204 , 2023a.   \\nHengyi Wang, Jingwen Wang, and Lourdes Agapito. Co-slam: Joint coordinate and sparse parametric encodings for neural real-time slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition ,pages 13293–13302, 2023.   \\nBernhard Kerbl, Georgios Kopanas, Thomas Leimkühler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics , 42(4), 2023.   \\nSiting Zhu, Guangming Wang, Hermann Blum, Jiuming Liu, Liang Song, Marc Pollefeys, and Hesheng Wang. Sni-slam: Semantic neural implicit slam. arXiv preprint arXiv:2311.11016 , 2023.   \\nJiaming He, Mingrui Li, Yangyang Wang, and Hongyu Wang. Ovd-slam: An online visual slam for dynamic environments. IEEE Sensors Journal , 2023.   \\nThomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. Robotics: Science and Systems, 2015.   \\nTong Qin, Peiliang Li, and Shaojie Shen. Vins-mono: A robust and versatile monocular visual-inertial state estimator. IEEE Transactions on Robotics , 34(4):1004–1020, 2018.   \\nAntoni Rosinol, Marcus Abate, Yun Chang, and Luca Carlone. Kimera: an open-source library for real-time metricsemantic localization and mapping. In 2020 IEEE International Conference on Robotics and Automation (ICRA) ,pages 1689–1696. IEEE, 2020.   \\nAlexander Hermans, Georgios Floros, and Bastian Leibe. Dense 3d semantic mapping of indoor scenes from rgb-d images. In 2014 IEEE International Conference on Robotics and Automation (ICRA) , pages 2631–2638. IEEE, 2014.   \\nGaku Narita, Takashi Seno, Tomoya Ishikawa, and Yohsuke Kaji. Panopticfusion: Online volumetric semantic mapping at the level of stuff and things. in 2019 ieee. In RSJ International Conference on Intelligent Robots and Systems (IROS) , pages 4205–4212.   \\nJohn McCormac, Ronald Clark, Michael Bloesch, Andrew Davison, and Stefan Leutenegger. Fusion $^{++}$ : Volumetric object-level slam. In 2018 international conference on 3D vision (3DV) , pages 32–41. IEEE, 2018.   \\nMingrui Li, Jiaming He, Yangyang Wang, and Hongyu Wang. End-to-end rgb-d slam with multi-mlps dense neural implicit representations. IEEE Robotics and Automation Letters , 2023b.   \\nChi-Ming Chung, Yang-Che Tseng, Ya-Ching Hsu, Xiang-Qian Shi, Yun-Hung Hua, Jia-Fong Yeh, Wen-Chin Chen, Yi-Ting Chen, and Winston H Hsu. Orbeez-slam: A real-time monocular visual slam with orb features and nerfrealized mapping. In 2023 IEEE International Conference on Robotics and Automation (ICRA) , pages 9400–9406. IEEE, 2023.   \\nThomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) , 41(4):1–15, 2022.   \\nYasaman Haghighi, Suryansh Kumar, Jean Philippe Thiran, and Luc Van Gool. Neural implicit dense semantic slam. arXiv preprint arXiv:2304.14560 , 2023.   \\nCarlos Campos, Richard Elvira, Juan J Gómez Rodríguez, José MM Montiel, and Juan D Tardós. Orb-slam3: An accurate open-source library for visual, visual–inertial, and multimap slam. IEEE Transactions on Robotics , 37(6): 1874–1890, 2021.   \\nChi Yan, Delin Qu, Dong Wang, Dan Xu, Zhigang Wang, Bin Zhao, and Xuelong Li. Gs-slam: Dense visual slam with 3d gaussian splatting. arXiv preprint arXiv:2311.11700 , 2023.   \\nNikhil Keetha, Jay Karhade, Krishna Murthy Jatavallabhula, Gengshan Yang, Sebastian Scherer, Deva Ramanan, and Jonathon Luiten. Splatam: Splat, track & map 3d gaussians for dense rgb-d slam. arXiv preprint arXiv:2312.02126 ,2023.   \\nHuajian Huang, Longwei Li, Hui Cheng, and Sai-Kit Yeung. Photo-slam: Real-time simultaneous localization and photorealistic mapping for monocular, stereo, and rgb-d cameras. arXiv preprint arXiv:2311.16728 , 2023.   \\nJonathon Luiten, Georgios Kopanas, Bastian Leibe, and Deva Ramanan. Dynamic 3d gaussians: Tracking by persistent dynamic view synthesis. In 3DV , 2024.   \\nNelson Max. Optical models for direct volume rendering. IEEE Transactions on Visualization and Computer Graphics ,1(2):99–108, 1995.   \\nJulian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul MurArtal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797 , 2019.   \\nAngela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 5828–5839, 2017a.   \\nAngela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG) ,36(4):1, 2017b.   \\nXingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-fusion: Dense tracking and mapping with voxel-based neural implicit representation. In 2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR) , pages 499–507. IEEE, 2022.   \\nMohammad Mahdi Johari, Camilla Carta, and François Fleuret. Eslam: Efficient dense slam system based on hybrid representation of signed distance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 17408–17419, 2023.  \"\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    \"paper_id\": \"65c19b5a939a5f40825fa482\",\n",
    "}\n",
    "\n",
    "result = db.query_whole_text_by_id(params)\n",
    "# ppr(result)\n",
    "print(result)\n",
    "# 直接返回全文字符串的形式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 7 论文标题查找全文"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\"# Loopy-SLAM: Dense Neural SLAM with Loop Closures  \\n\\nLorenzo Liso 1 \\\\* Erik Sandstr¨om 1 \\\\* Vladimir Yugay 3 Luc Van Gool 1 ,,Martin R. Oswald $^{1,3}$ 1 ETH Z¨urich 2 KU Leuven 3 University of Amsterdam 4 INSAIT  \\n\\n  \\nFigure 1. Benefits of Loopy-SLAM. While Point-SLAM yields high-fidelity reconstructions it does not implement loop closure and may duplicate geometries due to drift. ESLAM is faced by the same problem due to the lack of loop closure. GO-SLAM implements loop closure, but computes rather low quality map geometry. In contrast to GO-SLAM which requires to save the entire history of input frames used for mapping to update the map after loop closures, our approach anchors the neural scene representation on points which can simply be shifted without recomputing the dense map from scratch. We show the ATE RMSE and the depth L1 re-rendering error on the mesh for the TUM-RGBD fr1 room scene.  \\n\\n# Abstract  \\n\\nNeural RGBD SLAM techniques have shown promise in dense Simultaneous Localization And Mapping (SLAM), yet face challenges such as error accumulation during camera tracking resulting in distorted maps. In response, we introduce Loopy-SLAM that globally optimizes poses and the dense 3D model. We use frame-to-model tracking using a data-driven point-based submap generation method and trigger loop closures online by performing global place recognition. Robust pose graph optimization is used to rigidly align the local submaps. As our representation is point based, map corrections can be performed efficiently without the need to store the entire history of input frames used for mapping as typically required by methods employing a grid based mapping structure. Evaluation on the synthetic Replica and real-world TUM-RGBD and ScanNet datasets demonstrate competitive or superior performance in tracking, mapping, and rendering accuracy when compared to existing dense neural RGBD SLAM methods. Project page: notchla.github.io/Loopy-SLAM/ .  \\n\\n# 1. Introduction  \\n\\nOnline dense 3D reconstruction of scenes with an RGBD camera has been an active area of research for years [ 12 ,34 ,35 ,46 ,69 ,75 ], and remains an open problem. Recently, several works proposed to optimize an encoder-free neural scene representation at test time [ 26 ,43 ,52 ,59 ,71 ,75 ] with the potential to improve compression, extrapolate unseen geometry, provide a more seamless stepping point towards higher level reasoning such as 3D semantic prediction and leverage strong learnable priors as well as adapt to test time constraints via online optimization. One can make the distinction between coupled [26 ,43 ,44 ,52 ,55 ,59 ,71 ,75 ] and decoupled [9 ,29 ,40 ,74 ] solutions where coupled methods use the same representation for tracking and mapping while decoupled methods use independent frameworks for each task. Currently, the decoupled methods have achieved better tracking accuracy, but the decoupling creates undesirable data redundancy and independence since the tracking is performed independently of the estimated dense map. Tracking and mapping are coupled tasks and we therefore believe they should ultimately make use of the same scene representation. On the one hand, of the coupled methods, all but the concurrent MIPS-Fusion [ 55 ] implement just frame-to-model tracking, leading to significant camera drift on noisy real-world data, with corrupted maps as a result. On the other hand, the decoupled methods all make use of multi-resolution hash grids [ 9 ,29 ,40 ,74 ] and are therefore not easily transformable for map corrections e.g. as a result of loop closure, requiring expensive gradient-based updates and storing the input frames used for mapping for this purpose. Point-SLAM [ 43 ] has recently shown that a neural point cloud-based representation can be used as an efficient and accurate scene representation for mapping and tracking, but struggles to robustly track on noisy real-world data. Point-based representations are especially suitable for performing map corrections e.g. as a result of loop closure as they can be transformed fast and independently of each other. To this end, we introduce Loopy-SLAM, which inherits the data-adaptive scene encoding of Point-SLAM [43 ] and extends it with loop closure to achieve globally consistent maps and accurate trajectory estimation. Our contributions include:  \\n\\n• We propose Loopy-SLAM, a dense RGBD SLAM approach which anchors neural features in point cloud submaps that grow iteratively in a data-driven manner during scene exploration. We dynamically create submaps depending on the camera motion and progressively build a pose graph between the submap keyframes. Global place recognition is used to detect loop closures online and to globally align the trajectory and the submaps with simple and efficient rigid corrections directly on the scene representation. See Fig. 1 .  \\n• We propose a direct way of implementing loop closure for dense neural SLAM that does not require any gradient updates of the scene representation or reintegration strategies, contrary to previous works   \\n• Traditionally, rigid submap registration may create visible seams in the overlapping regions. Our approach based on neural point clouds avoids this and we apply feature refinement of color and geometry at the end of the trajectory capture. We further introduce a feature fusion strategy of the submaps in the overlapping regions to avoid excessive memory usage and to improve the rendering performance.  \\n\\n# 2. Related Work  \\n\\nDense Visual SLAM and Online Mapping. The seminal work of TSDF Fusion [ 10 ] was the starting point for a large body of works using truncated signed distance functions (TSDF) to encode scene geometry. KinectFusion [ 34 ]was among the first to show that dense mapping and tracking using depth maps can be achieved in real-time. A selection of works improved the scalability via voxel hashing [ 20 ,35 ,37 ] and octrees [ 13 ,18 ,47 ,57 ] and pose robustness via sparse image features [ 5 ] and loop closure [6 ,12 ,28 ,46 ,69 ,72 ]. Learning-based methods have also successfully been applied to the dense mapping problem, via learned updates of TSDF values [ 64 ] or neural features [ 1 ,19 ,38 ,60 ,65 ]. A number of recent works do not need depth input and accomplish dense online reconstruction from RGB cameras only [ 4 ,7 ,23 ,33 ,45 ,48 ,53 ], but typically require camera poses as input. Lately, methods relying on test-time optimization have become popular again due to the wide adaptability of differentiable renderers for effective reprojection error minimization. For example, Neural Radiance Fields [ 30 ] inspired works for dense surface reconstruction [ 36 ,61 ] and pose estimation [ 2 ,24 ,40 ,63 ] and have matured into full dense SLAM pipelines [ 26 ,43 ,44 ,52 ,55 ,59 ,71 ,75 ,76 ], which use the same coupled scene representation for mapping and tracking. A selection of similar works choose to decouple mapping and tracking into independent pipelines to realize SLAM [ 9 ,29 ,40 ,74 ]. Though the decoupled approach seems to currently achieve better tracking (since the representations can be optimized individually for each task), mapping and tracking are inherently coupled and we therefore believe they should be treated as such. We base our work on the recent Point-SLAM [ 43 ] framework which is especially suited for loop closure as the scene representation, consisting of points, is simple to transform. More importantly, map corrections can be achieved without a reintegration strategy per frame as in [ 12 ,28 ,74 ] which requires storing the entire history of input frames used for mapping and is resource-demanding for larger scenes.  \\n\\nLoop Closure on Dense Maps. The majority of dense methods tackling the problem of loop closure to attain a globally consistent dense map is done by subdividing the map into pieces, oftentimes called submaps [ 3 ,6 ,8 ,12 ,15 ,17 ,20 ,21 ,27 –29 ,39 ,50 ,55 ]. The submaps usually consist of a limited number of frames which are accumulated into a map. The submaps are then rigidly registered together via approximate global bundle adjustment via pose graph optimization [ 6 ,8 ,13 ,14 ,16 ,17 ,21 ,22 ,28 ,29 ,39 ,46 ,50 ,55 ,58 ,70 ], sometimes followed by global bundle adjustment for refinement [ 6 ,12 ,46 ,56 ,70 ,72 ]. Few works deviate from this methodology by optimizing a deformation graph [ 66 ,68 ,69 ]. Specifically, ElasticFusion [ 69 ] optimizes a sparse as-rigid-as-possible deformation graph to register a temporally recent active submap against an inactive global submap. Since the active map is deformed into the inactive map, drift cannot be well tackled in the inactive map, which can lead to global map inconsistencies. We therefore also split our map into submaps and apply online pose graph optimization. Among the recent dense neural SLAM works, some apply loop closure [ 9 ,29 ,55 ,74 ]. Orbeez-SLAM [ 9 ] and NEWTON [ 29 ] use a decoupled approach by employing ORB-SLAM2 [ 32 ] as the tracking system. Orbeez-SLAM and NEWTON use multi-resolution hash grids, requiring undesirable training iterations to perform map corrections. NEWTON uses multiple local spherical hash grids akin to submaps, but they focus mostly on view synthesis. GO-SLAM [ 74 ] also uses a decoupled approach by extending DROID-SLAM [ 56 ] to the online loop closure setting and coupling it with a map via Instant-NGP [31 ]. Their results are impressive for tracking, but focus less on reconstruction and rendering. Furthermore, they also require training iterations to the hash grids to perform map corrections. Common for all works employing hash grids is that they require to store the entire history of input frames used for mapping to perform the map corrections. This limits their scalability. In contrast, by rigidly aligning submaps, our method is not restricted to the same degree. Concurrent to our work, MIPS-Fusion [ 55 ] is the only other work using a coupled approach with loop closure. They use MLPs which encode TSDFs to represent local submaps and perform loop closure by rigid registration of the submaps, but focus mainly on tracking and not on reconstruction nor rendering. Finally, MIPS-Fusion detects loop closures via covisibility thresholds, which does not allow for the correction of large drifts, in contrast to global place recognition e.g. via [ 42 ], which we use.  \\n\\n# 3. Method  \\n\\nThis section details our dense RGBD SLAM system. Specifically, we grow submaps of neural point clouds in a progressive manner as the scene space is explored. Frameto-model tracking alongside mapping is applied on every active submap with a direct loss formulation (Sec. 3.1 ). Based on the camera motion, we dynamically trigger new global keyframes and associated submaps. When a submap is completed, we perform global place recognition to detect potential loop closures and add the relevant edges to a pose graph which is optimized using dense surface registration constraints. To further refine the scene representation, at the end of trajectory capture, we first apply feature fusion where the submaps overlap followed by color and geometry feature refinement (Sec. 3.2 ). Fig. 2 shows an overview.  \\n\\n# 3.1. Neural Point Cloud-based SLAM  \\n\\nPoint cloud-based SLAM as proposed in [ 43 ] lends itself for deforming a dense scene representation upon loop closures since both geometry and appearance are locally encoded in features anchored in a point cloud. These anchor points can be continuously shifted to deform the scene without the need to compute the dense representation from scratch using the original input data. To adapt the feature point cloud representation for loop closure updates, we redefine it as a of $s\\\\in\\\\mathbb{N}$ submaps, h containing a neural point cloud $P^{s}$ with a collection of Nneural points  \\n\\n$$\\n{\\\\cal P}^{s}=\\\\{(p_{i}^{s},f_{i}^{s,g},f_{i}^{s,c})\\\\,|\\\\,i=1,\\\\ldots,N^{s}\\\\}\\\\,\\\\,\\\\,,\\n$$  \\n\\neach with a position $p_{i}^{s}\\\\in\\\\mathbb{R}^{3}$ and etric and color feature descriptor $f_{i}^{s,g}\\\\in\\\\mathbb{R}^{32}$ ∈and $f_{i}^{s,c}\\\\in\\\\mathbb{R}^{32}$ ∈respectively.  \\n\\nBuilding Submaps Progressively. Mapping and tracking are always performed on the active submap, defined as the most recently created submap. We associate the first frame of the submap as a global keyframe. The keyframe defines the pose of the submap in the global reference frame. We adopt the point adding strategy and dynamic resolution from Point-SLAM [ 43 ] and progressively grow each submap in a data dependent way to ensure efficiency and accuracy. Depth and color rendering follows [ 43 ]i.e. given a camera pose with origin $\\\\mathbf{O}$ , we sample a set of points $x_{i}$ as  \\n\\n$$\\nx_{i}=\\\\mathbf{O}+z_{i}\\\\mathbf{d},\\\\quad i\\\\in\\\\{1,\\\\ldots,M\\\\}\\\\,\\\\,,\\n$$  \\n\\nwhere $z_{i}\\\\in\\\\mathbb{R}$ is the p nt depth and $\\\\mathbf{d}\\\\in\\\\mathbb{R}^{3}$ the ray direction. After the points $x_{i}$ have been sampled, the occupancies $\\\\mathrm{O}_{i}$ and colors $\\\\mathbf{c}_{i}$ are decoded using MLPs as  \\n\\n$$\\n\\\\mathrm{o}_{i}=h\\\\big(x_{i},P^{s,g}(x_{i})\\\\big)\\\\qquad\\\\mathrm{}\\\\mathrm{}\\\\mathrm{~}\\\\mathbf{c}_{i}=g_{\\\\xi}\\\\big(x_{i},P^{s,c}(x_{i})\\\\big)\\\\ \\\\mathrm{~.~}\\n$$  \\n\\nHere, $P^{s,g}(x_{i})$ and $P^{s,c}(x_{i})$ denote the interpolated geometric and color features from the submap $P^{s}$ . The geometry and color decoder MLPs are denoted $h$ and $g$ . We make a small adjustment to the mapping strategy. Apart from the feature, the decoders take the 3D point $x_{i}$ as input, to which a learnable Gaussian positional encoding [ 54 ] is applied. However, while keeping the geometric MLP fixed, we allow the encoding to be optimized on the fly. At loop closure, when the points are shifted, they may not decode to the exact same value as before in their new location. Using an on-the-fly adaptive positional encoding gives the system a simple way of adjusting instead of updating the feature at each point, which is more expensive. For details on feature interpolation and rendering equations for color $\\\\hat{I}$ and depth $\\\\hat{D}$ , we refer to [ 43 ].  \\n\\nTracking and Mapping Losses. Tracking and mapping are applied in an alternating fashion on the active submap and performed equivalently to [ 43 ]. For tracking we render $M_{t}$ pixels across the RGBD frame and minimize the re-rendering loss to the sensor reading $D$ and $I$ as  \\n\\n$$\\n\\\\mathcal{L}_{\\\\mathrm{track}}=\\\\sum_{k=1}^{M_{t}}\\\\frac{|D_{k}-\\\\hat{D}_{k}|_{1}}{\\\\sqrt{\\\\hat{S}_{D}}}+\\\\lambda_{t}|I_{k}-\\\\hat{I}_{k}|_{1}\\\\,\\\\mathrm{~.~}\\n$$  \\n\\n$\\\\hat{D}$ and $\\\\hat{I}$ are the rendered depth and color, $\\\\hat{S}_{D}$ is the variance of $\\\\hat{D}$ (see [ 43 ]) and $\\\\lambda_{t}$ is a hyperparameter. For mapping we render $M$ pixels across the frame and minimize the loss  \\n\\n$$\\n\\\\mathcal{L}_{m a p}=\\\\sum_{k=1}^{M}\\\\lvert D_{k}-\\\\hat{D}_{k}\\\\rvert_{1}+\\\\lambda_{m}\\\\lvert I_{k}-\\\\hat{I}_{k}\\\\rvert_{1}\\\\,\\\\mathrm{~,~}\\n$$  \\n\\nwhere $\\\\lambda_{m}$ is a hyperparameter.  \\n\\nKeyframe Selection and Submap Initialization. Creating submaps too often can increase pose drift, especially for trajectories with many small loops. Instead of using a fixed interval when creating the global keyframes as in [ 8 ,12 ,27 ], we dynamically create global keyframes based on the camera motion [ 6 ,50 ]. When the rotation angle to the global keyframe of the active submap exceeds a threshold $\\\\sigma$ or the relative translation exceeds $\\\\theta$ , we create a new submap. For each new submap $P^{s}$ , to speed up the mapping process, we initialize it with the projection of the past neural point cloud submap $P^{s-1}$ into the new global keyframe. Apart from the global keyframes, we also keep local keyframes which are generated at a regular interval within each submap to constrain the mapping as in [ 43 ], but on a per-submap basis, instead of on the global scene representation. These are deleted when a new submap is initialized.  \\n\\n  \\nFigure 2. Loopy-SLAM Overview. Given an input RGBD stream, we first track the frame against the current active submap. If a new global keyframe is triggered from the estimated motion, we initialize a new submap, otherwise we continue mapping against the same submap. If a loop is detected between the just completed submap and the past global keyframes, pose graph optimization (PGO) is triggered. First, we compute the loop edge constraints (1) with a coarse to fine dense surface registration technique and then PGO (2) is performed with a robust dense surface registration objective. The poses and submaps are then rigidly corrected to achieve global pose and map alignment (3). Finally, the just triggered new global keyframe is added to the place recognition database.  \\n\\n# 3.2. Loop Closure and Refinement  \\n\\nGlobal place recognition is performed before starting a new submap to build edges in a pose graph. Loop closure edge constraints are computed using a coarse to fine registration strategy and the pose graph is optimized with a robust line process to reject outlier edge candidates. The output from the pose graph optimization (PGO) is a set of refined global keyframe poses which are used to correct all frame poses and map points belonging to each submap. At the end of trajectory capture, feature fusion and refinement are performed jointly on all submaps.  \\n\\nGlobal Place Recognition. To allow for the correction of arbitrary drifts we add every global keyframe to a bag of visual words (BoW) database [ 42 ] for global place recognition. Every time a global keyframe is created, it is added to the BoW database. This is in contrast to e.g. MIPSFusion [ 55 ] which detects loop closures via submap overlap, which is limited to the correction of smaller drifts.  \\n\\nPose Graph Optimization. We build a pose graph by first defining each node $T_{s}~\\\\in~\\\\mathrm{SE}(3)$ as the correction to the world coordinate pose of the global keyframe. We further tween the adjacent keyframes of submaps populate odometry edges with identity const P$P^{s}$ nts and $\\\\{I_{s}\\\\}$ P$P^{s+1}$ -.non-adjacent nodes by querying the BoW database when a Loop edge constraints $\\\\left\\\\{T_{s t}\\\\right\\\\}\\\\,\\\\in\\\\,\\\\mathrm{SE}(3)$ are added between submap has been completed. We query the top $K$ neighbors from the BoW and add them as nodes in the pose graph if the visual similarity score is higher than a dynamically computed threshold $s_{m i n}$ . The threshold $s_{m i n}$ is the minimum score between the global keyframe and the frames of the associated submap. PGO is triggered in an online fashion to mitigate real-time inter-submap drift, which is critical to be resolved as early as possible. We use a robust PGO strategy based on dense surface registration which filters outlier loop edges during optimization, following [ 8 ]. We choose a dense surface registration objective since it is inherently tied to the local submaps which we aim to correct, in contrast to the relative pose residual used in e.g. [32 ]. To be robust against erroneous loop edges, a line process $\\\\mathbb{L}=\\\\left\\\\{l_{s t}\\\\right\\\\}$ is added as a jointly optimized weight $(l_{s t}\\\\,\\\\in\\\\,[0,1])$ over the loop edges. We optimize the global keyframe pose corrections $\\\\mathbb{T}=\\\\{T_{s}\\\\}$ along with the loop weights $\\\\mathbb{L}$ by minimizing the objective  \\n\\n$$\\n\\\\begin{array}{r l r}{\\\\lefteqn{\\\\mathbb{E}(\\\\mathbb{T},\\\\mathbb{L})=\\\\sum_{s}f(T_{s},T_{s+1},I_{s})+\\\\lambda\\\\Big(\\\\sum_{s,t}l_{s t}f(T_{s},T_{t},T_{s t})\\\\Big.\\\\Big.}}\\\\\\\\ &{}&{\\\\displaystyle+\\\\left.\\\\mu\\\\sum_{s,t}(\\\\sqrt{l_{s t}}-1)^{2}\\\\right)\\\\ ,\\\\qquad\\\\qquad\\\\qquad(6)}\\\\end{array}\\n$$  \\n\\nwhere $\\\\lambda$ and $\\\\mu$ are hyperparameters. The dense surface registration terms $f(T_{s},T_{t},X)$ are defined as the sum of squared distances between corresponding points in submaps  \\n\\n$P^{s}$ and $P^{t}$  \\n\\n$$\\n\\\\begin{array}{r l}{\\\\lefteqn{f(T_{s},T_{t},X)=\\\\sum_{(p,q)\\\\atop p}||T_{s}p-T_{t}q||^{2}}}\\\\\\\\ &{\\\\approx\\\\sum_{p}||T_{s}p-T_{t}X p||^{2},}\\\\end{array}\\n$$  \\n\\nwhere $(p,q)$ defines the set of corresponding points. The last term in Eq. ( 6 ) is a regularizer to prevent the trivial solution. The objective is optimized with Levenberg-Marquardt. For more details, we refer to Choi et al. [8 ]. We initialize $\\\\mathbb{T}$ to identity and follow a two-stage optimization where, in a first stage, loop edges with $l_{s t}<l_{m i n}$ are removed. In a second stage, all remaining loop edges are used. The output from the PGO is a set of rigid correction terms $\\\\mathbb{T}$ to the global keyframe poses. We apply $\\\\mathbb{T}$ to the keyframe world coordinate poses and the frame poses associated with the submaps, as well as the submaps themselves.  \\n\\nLoop Edge Constraints. For every loop edge in the pose graph between submaps $P^{s}$ and $P^{t}$ , the constraints $T_{s t}$ need to be computed. We use a coarse to fine dense registration technique to align the source and target submaps. We found that using the neural point cloud submaps $P^{s}$ directly was inherently unstable for two reasons: 1) dense surface registration methods need uniformly drawn samples on the surface, but the submaps $P^{s}$ have a dynamic resolution, 2) the anchored points in $P^{s}$ come from a single depth observation, which may be noisy and can corrupt the surface registration. To mitigate these two effects, we suppress noise by integrating all depth frames associated with a submap with volumetric TSDF Fusion [ 10 ] and sample uniformly drawn points from the surface extracted by marching cubes [ 25 ]. Denote the point clouds extracted by volumetric fusion from submaps $P^{s}$ and $P^{t}$ as $S^{s}$ and $S^{t}$ respectively. As coarse alignment we use the global registration method of Rusu et al. [41 ] which extracts Fast Point Feature Histograms (FPFH) features for each point from down sampled versions of the source $S^{s}$ and target $S^{t}$ point clouds. Correspondence search is then performed in the FPFH feature space rather than in Euclidean 3-space. The optimization is wrapped in a RANSAC framework to reject outlier correspondences and the output is a rigid correction to the source point cloud $S^{s}$ such that it aligns with the target $S^{t}$ . To refine the estimate, ICP is used on the full resolution point clouds. Finally, though the PGO has built in outlier handling, we find it useful to prefilter the loop edges based on the quality of the constraints. Specifically, we find a strong correlation between the error of the constraint and the translation magnitude of the constraint. We therefore remove edges with a translation constraint magnitude above a dynamically computed threshold $t_{m i n}$ . We compute the threshold $t_{m i n}$ based on statistics from all loop constraints by using a percentile that yields a standard deviation on the remaining loop edges below a threshold $\\\\sigma_{m i n}$ . Additionally, we require that the so called fitness score, which measures the overlapping area ( #of inlier correspondences / #of points in target), to be above a threshold $f_{m i n}$ .  \\n\\nFeature Fusion and Refinement. At the end of the trajectory capture, we concatenate all submaps to a global neural point cloud from which a global 3D model can be extracted. During concatenation, we first perform feature fusion in the overlapping submap regions to compress the model. Concretely, thanks to the projective initialization strategy when creating new submaps, point correspondences between submaps come for free. Note that these neural point correspondences are not the same as those between the point cloud correspondences in Eq. ( 7 ), which are used to compute the loop edge constraints. The submaps $P^{s}$ create a chain of correspondences and corresponding points can thus exist between more than two submaps. The corresponsubmaps for each point and dences are averaged in terms of location and features com $\\\\begin{array}{r}{\\\\overline{{f}}_{i}^{c}~=~\\\\sum_{s}f_{i}^{s,c}/|s|}\\\\end{array}$ P||$\\\\begin{array}{r}{\\\\overline{{p}}_{i}=\\\\sum_{s}p_{i}^{s}/|s|}\\\\end{array}$ , where we sum over the relevant i which has correspondences. |,$\\\\begin{array}{r}{\\\\overline{{f}}_{i}^{g}=\\\\sum_{s}f_{i}^{s,g}/|s|}\\\\end{array}$ Pi.e. we $|s|$ |denotes the cardinality of the set of submaps we sum over.  \\n\\nAfter feature fusion, we perform a set of refinements steps on the global neural point cloud. During this step, we use the global keyframes and optimize the color and geometric features using a fixed color decoder $g_{\\\\xi}$ .  \\n\\n# 4. Experiments  \\n\\nWe describe our experimental setup and then evaluate our method against state-of-the-art dense neural RGBD SLAM methods on Replica [ 49 ] as well as the real world TUMRGBD [ 51 ] and the ScanNet [ 11 ] datasets. For more experiments and details, we refer to the supplementary material.  \\n\\nImplementation Details. For global keyframe selection we use $\\\\theta~=~0.3m$ and $\\\\sigma\\\\:=\\\\:20^{\\\\circ}$ on Replica and Scannet. On TUM-RGBD, we use $\\\\theta\\\\:=\\\\:0.45m$ and $\\\\sigma\\\\,=\\\\,30^{\\\\circ}$ .For the loop closure specific parameters, we add the top $K\\\\ =\\\\ 4$ queries from the BoW for Replica and $K\\\\ =\\\\ 1$ for ScanNet and TUM-RGBD. To pre-filter loop edges, we use $\\\\sigma_{m i n}\\\\,=\\\\,0.15$ and $f_{m i n}=0.1$ for ScanNet and TUMRGBD. For Replica, no pre-filtering is performed. To prune the loop edges, we use $l_{m i n}=0.25$ on Replica and Scannet while we use $l_{m i n}\\\\,=\\\\,0.1$ on TUM-RGBD. Following [ 8 ], we use $\\\\mu\\\\,=\\\\,0.04\\\\kappa$ , where $\\\\kappa$ is the average cardinality of set of correspondences between the two correspondence sets $\\\\kappa_{i j}$ $X S^{i}$ and $\\\\kappa_{j i}$ $S^{j}$ that are within $\\\\kappa_{i j}$ is the distance $\\\\epsilon=0.05\\\\mathrm{~m~}$ . We use $\\\\lambda\\\\,=\\\\,5$ for all experiments. For the tracking and mapping specific hyperparameters as well as meshing, we follow [ 43 ], i.e. we render depth and color every fifth frame over the estimated trajectory and use TSDF Fusion [ 10 ] with voxel size $1\\\\;\\\\mathrm{cm}$ . We use $\\\\lambda_{t}\\\\,=\\\\,0.5$ and $\\\\lambda_{m}=0.1$ for the color weight in the tracking and mapping loss respectively. For tracking, we sample $M_{t}=1.5K$ $\\\\scriptstyle(\\\\mathrm{K=kilo})$ pixels uniformly on Replica. On TUM-RGBD and ScanNet, we first compute the top 75K pixels based on the image gradient magnitude and sample $M_{t}=5K$ out of this set. For mapping, we sample uniformly $M=5K$ pixels for Replica and 10K pixels for TUM-RGBD and ScanNet. See the supplementary material for more details.  \\n\\n<html><body><table><tr><td>Method</td><td>LC</td><td>Rm 0</td><td>Rm 1</td><td>Rm 2</td><td>Off</td><td>Off</td><td>1 Off</td><td>2 Off 3</td><td>Off</td><td>4 Avg.</td></tr><tr><td>NICE-SLAM [75]</td><td></td><td>0.97</td><td>1.31</td><td>1.07</td><td>0.88</td><td>1.00</td><td>1.06</td><td>1.10</td><td>1.13</td><td>1.06</td></tr><tr><td>Vox-Fusion[71]</td><td></td><td>1.37</td><td>4.70</td><td>1.47</td><td>8.48</td><td>2.04</td><td>2.58</td><td>1.11</td><td>2.94</td><td>3.09</td></tr><tr><td>ESLAM[26]</td><td></td><td>0.71</td><td>0.70</td><td>0.52</td><td>0.57</td><td>0.55</td><td>0.58</td><td>0.72</td><td>0.63</td><td>0.63</td></tr><tr><td>Point-SLAM[43]</td><td></td><td>0.61</td><td>0.41</td><td>0.37</td><td>0.38</td><td>0.48</td><td>0.54</td><td>0.69</td><td>0.72</td><td>0.52</td></tr><tr><td>MIPS-Fusion[55]</td><td></td><td>1.10</td><td>1.20</td><td>1.10</td><td>0.70</td><td>0.80</td><td>1.30</td><td>2.20</td><td>1.10</td><td>1.19</td></tr><tr><td>GO-SLAM[74]</td><td></td><td>0.34</td><td>0.29</td><td>0.29</td><td>0.32</td><td>0.30</td><td>0.39</td><td>0.39</td><td>0.46</td><td>0.35</td></tr><tr><td>Ours</td><td></td><td>0.24</td><td>0.24</td><td>0.28</td><td>0.26</td><td>0.40</td><td>0.29</td><td>0.22</td><td>0.35</td><td>0.29</td></tr></table></body></html>\\n\\nTable 1. [cm]). For all but one scene, we achieve more accurate tracking Tracking Performance on Replica [ 49 ](ATE RMSE ↓than existing methods. LC indicates loop closure. The best results are highlighted as first , second , and third .  \\n\\nEvaluation Metrics. The meshes are extracted with marching cubes [ 25 ] and evaluated using the F-score which is the harmonic mean of the Precision and Recall. A distance threshold of $1~\\\\mathrm{cm}$ is used for all evaluations. We also provide the depth L1 metric which evaluates the depth on the mesh at random poses against its ground truth. For tracking accuracy, we use ATE RMSE [ 51 ] and for rendering we report the peak signal-to-noise ratio (PSNR), SSIM [ 62 ] and LPIPS [ 73 ]. Our rendering metrics are evaluated by rendering the full resolution image along the estimated trajectory every 5th frame. Unless otherwise written, we report the average metric of three runs.  \\n\\nDatasets. The Replica dataset [ 49 ] consists of high-quality 3D reconstructions of diverse indoor scenes. We leverage the publicly available dataset by Sucar et al. [52 ], which contains trajectories from an RGBD sensor. Additionally, we showcase our framework on real-world data using the TUM-RGBD dataset [ 51 ] and the ScanNet dataset [ 11 ]. The TUM-RGBD poses were captured utilizing an external motion capture system, while ScanNet uses poses from BundleFusion [ 12 ].  \\n\\nBaseline Methods. We primarily compare our method to existing state-of-the-art dense neural RGBD SLAM methods such as ESLAM [ 26 ], Point-SLAM [ 43 ] and GOSLAM [ 74 ]. We use the numbers from the respective papers where available. Otherwise, we reproduce them ourselves.  \\n\\n# 4.1. Reconstruction  \\n\\nFig. 3a compares our method to state-of-the-art dense RGBD neural SLAM methods in terms of the geometric reconstruction accuracy. We outperform all methods on the majority of scenes and report an average improvement of $20\\\\,\\\\%$ and $70\\\\,\\\\%$ to the second (Point-SLAM) and third (ESLAM) best performing methods on the depth L1 metric. Fig. 3b compares the mesh reconstructions of ESLAM [ 26 ], GO-SLAM [ 74 ], Point-SLAM [ 43 ] and our method to the ground truth mesh. We find that our method is able to resolve fine details to a significant extent, even beating PointSLAM on detailed geometry (see the zoomed in visualizations). We attribute this to our online loop closure strategy which globally optimizes the poses and submaps globally. Finally, in Fig. 4 we qualitatively evaluate on ScanNet, showing improvements in geometric accuracy over ESLAM and GO-SLAM and over Point-SLAM due to more accurate pose estimates.  \\n\\n<html><body><table><tr><td>Method</td><td>LC</td><td>fr1/ fr1 / desk desk2</td><td>frl/ fr2/ room xyz</td><td></td><td>fr3/ Avg. office</td></tr><tr><td>DI-Fusion [19]</td><td>4.4</td><td>N/A</td><td>N/A</td><td>2.0 5.8</td><td>N/A</td></tr><tr><td>NICE-SLAM [75]</td><td>4.26</td><td>4.99</td><td>34.49</td><td>6.19 3.87</td><td>10.76</td></tr><tr><td>Vox-Fusion [71]</td><td>3.52</td><td>6.00</td><td>19.53</td><td>1.49 26.01</td><td>11.31</td></tr><tr><td>MIPS-Fusion [55]</td><td>3.0</td><td>N/A</td><td>N/A</td><td>1.4 4.6</td><td>N/A</td></tr><tr><td>Point-SLAM [43]</td><td>4.34</td><td>4.54</td><td>30.92</td><td>1.31 3.48</td><td>8.92</td></tr><tr><td>ESLAM [26]</td><td>2.47</td><td>3.69</td><td>29.73</td><td>1.11 2.42</td><td>7.89</td></tr><tr><td>Co-SLAM [59]</td><td>2.40</td><td>N/A</td><td>N/A</td><td>1.7 2.4</td><td>N/A</td></tr><tr><td>GO-SLAM [74]</td><td>1.5</td><td>N/A</td><td>4.64</td><td>0.6 1.3</td><td>N/A</td></tr><tr><td>Ours</td><td>3.79</td><td>3.38</td><td>7.03</td><td>1.62 3.41</td><td>3.85</td></tr><tr><td>BAD-SLAM [46]</td><td>√ 1.7</td><td>N/A</td><td>N/A</td><td>1.1 1.7</td><td>N/A</td></tr><tr><td>Kintinuous [67]</td><td>3.7</td><td>7.1</td><td>7.5</td><td>2.9 3.0</td><td>4.84</td></tr><tr><td>ORB-SLAM2[32]</td><td>1.6</td><td>2.2</td><td>4.7</td><td>0.4 1.0</td><td>1.98</td></tr><tr><td>ElasticFusion[69]</td><td>2.53</td><td>6.83</td><td>21.49</td><td>1.17 2.52</td><td>6.91</td></tr><tr><td>BundleFusion [12]</td><td>1.6</td><td>N/A</td><td>N/A</td><td>1.1 2.2</td><td>N/A</td></tr><tr><td>Cao et al. [6]</td><td>1.5</td><td>N/A</td><td>N/A</td><td>0.6 0.9</td><td>N/A</td></tr><tr><td>Yan et al. [70]</td><td>1.6</td><td>N/A</td><td>5.1</td><td>N/A 3.1</td><td>N/A</td></tr></table></body></html>  \\n\\nTable 2. Tracking Performance on TUM-RGBD [ 51 ](ATE RMSE on a variety of scenes. On average Loopy-SLAM outperforms ex↓[cm]). Loopy-SLAM shows competitive performance isting dense neural RGBD methods (top part) that do not employ loop closure (LC), and is reducing the gap to traditional dense and sparse SLAM methods (bottom part).   \\n\\n\\n<html><body><table><tr><td>Method</td><td>00</td><td>59</td><td>106</td><td>169</td><td>181</td><td>207</td><td>54</td><td>233465</td><td></td><td>Avg.-6 Avg.-9</td><td></td></tr><tr><td>Vox-Fusion[71]</td><td>16.6</td><td>24.2</td><td>8.4</td><td>27.3</td><td>23.3</td><td>9.4</td><td></td><td></td><td></td><td>18.5</td><td></td></tr><tr><td>Co-SLAM[59]</td><td>7.1</td><td>11.1</td><td>9.4</td><td>5.9</td><td>11.8</td><td>7.1</td><td></td><td></td><td></td><td>8.8</td><td></td></tr><tr><td>MIPS-Fusion[55]</td><td>7.9</td><td>10.7</td><td>9.7</td><td>9.7</td><td>14.2</td><td>7.8</td><td></td><td></td><td></td><td>10.0</td><td></td></tr><tr><td>NICE-SLAM[75]</td><td>12.0</td><td>14.0</td><td>7.9</td><td>10.9</td><td>13.4</td><td>6.2</td><td>20.9</td><td>9.0</td><td>22.3</td><td>10.7</td><td>13.0</td></tr><tr><td>ESLAM[26]</td><td>7.3</td><td>8.5</td><td>7.5</td><td>6.5</td><td>9.0</td><td>5.7</td><td>36.3</td><td>4.3</td><td>16.5</td><td>7.4</td><td>11.3</td></tr><tr><td>Point-SLAM[43]</td><td>10.2</td><td>7.8</td><td>8.7</td><td>22.2</td><td>14.8</td><td>9.5</td><td>28.0</td><td>6.1</td><td>21.6</td><td>12.2</td><td>14.3</td></tr><tr><td>GO-SLAM[74]</td><td>5.4</td><td>7.5</td><td>7.0</td><td>7.7</td><td>6.8</td><td>6.9</td><td>8.8</td><td>4.8</td><td>8.2</td><td>6.9</td><td>7.0</td></tr><tr><td>Ours</td><td>4.2</td><td>7.5</td><td>8.3</td><td>7.5</td><td>10.6</td><td>7.9</td><td>7.5</td><td>5.2</td><td>10.9</td><td>7.7</td><td>7.7</td></tr></table></body></html>  \\n\\n[cm]). Loopy-SLAM yields competitive performance on a variety Table 3. Tracking Performance on ScanNet [ 11 ](ATE RMSE ↓of scenes. Avg.-6 and Avg.-9 means averaging over the 6 and 9 scenes respectively.   \\n\\n\\n<html><body><table><tr><td>Metric</td><td>NICE- SLAM[75]</td><td>Vox- Fusion[71]</td><td>ESLAM [26]</td><td>Point- SLAM[43]</td><td>Ours</td></tr><tr><td>PSNR [dB]↑</td><td>24.42</td><td>24.41</td><td>27.8</td><td>35.17</td><td>35.47</td></tr><tr><td>SSIM↑</td><td>0.809</td><td>0.801</td><td>0.921</td><td>0.975</td><td>0.981</td></tr><tr><td>LPIPS√</td><td>0.233</td><td>0.236</td><td>0.245</td><td>0.124</td><td>0.109</td></tr></table></body></html>\\n\\nTable 4. Rendering Performance on Replica [ 49 ]. We marginally outperform Point-SLAM on the commonly reported rendering metrics. Otherwise, we outperform existing methods like NICE-SLAM, Vox-Fusion and ESLAM. Results are averaged over all 8 scenes.  \\n\\n  \\nFigure 3. Reconstruction Performance on Replica [ 49 ]. Fig. 3a : Our method performs better than all existing methods on average. Fig. 3b : Compared to ESLAM which uses axis aligned feature planes and GO-SLAM which uses multi-resolution hash grids, LoopySLAM has a significant advantage in terms of the accuracy of the reconstructions due to the neural point cloud of dynamic resolution. Moreover, with the pose accuracy we obtain via loop closure, we close the gap to the ground truth further. See specifically the zoomed in visualizations. ∗Depth L1 for GO-SLAM shows our reproduced results from random poses (GO-SLAM evaluates on ground truth poses).  \\n\\n  \\nFigure 4. Mesh Evaluation on ScanNet [ 11 ]. Loopy-SLAM yields drift free large scale reconstructions compared to Point-SLAM (scene 54, scene 181, scene 169 ) and ESLAM ( scene 54 ) and with more accurate geometry compared to GO-SLAM (all scenes) and ESLAM ( scene 54, scene 181 ). The green boxes highlight drifted or poor geometry. The red boxes show the zoomed in view locations.  \\n\\n# 4.2. Tracking  \\n\\nWe report the tracking performance on the Replica dataset in Tab. 1 . We outperform the existing methods on all scenes except one. We attribute this to robust frame-to-model local pose estimation coupled with our pose graph optimization which globally aligns the submap frames. We further show competitive performance on real-world data by evaluating on the TUM-RGBD and ScanNet datasets in Tab. 2 and Tab. 3 respectively. On both datasets, among the dense neural SLAM methods, we are competitive compared to GO-SLAM [ 74 ] while beating all other dense neural SLAM methods on average. The fr1 room and scene 54 scenes highlights the importance of incorporating loop closure - the best method without loop closure achieves an ATE RMSE of $19.53~\\\\mathrm{cm}$ on the fr1 room scene while GO-SLAM and our method can reduce this significantly. Encouragingly, Loopy-SLAM shows state-of-the art performance on scene 54 which is the only multi-room scene and the largest scene in terms of spatial extent. We show mesh evaluations on ScanNet in Fig. 4 which further emphasizes the need for online loop closure compared to ESLAM and Point-SLAM.  \\n\\n# 4.3. Rendering  \\n\\nTab. 4 compares rendering performance on the Replica dataset and shows competitive performance to PointSLAM [ 43 ], while beating NICE-SLAM [ 75 ], VoxFusion [ 71 ] and ESLAM [ 26 ]. Unfortunately, the code for rendering using GO-SLAM [ 74 ] did not work at the time of submission.  \\n\\n# 4.4. Further Statistical Evaluation  \\n\\nMemory and Runtime Analysis. Tab. 5 shows the runtime and memory footprint of our method. Our memory usage is competitive and we can run all our experiments on a 12 GB GPU card. GO-SLAM [ 74 ] and ESLAM [ 26 ] typically require a 24 GB card. Our mapping and tracking runtime is equivalent to Point-SLAM [ 43 ] (excluding loop closure) and fully implemented using Pytorch. On scene fr1 desk we report 7 PGOs taking on average $1\\\\ \\\\mathrm{ms/PGO}$ and requiring on average 8 registrations/PGO which on average take 12 sec/registration. We note that all registrations but the ones belonging to the active submap can be computed in parallel while mapping the active submap. The registrations to the active submap can be computed while mapping the next submap.  \\n\\nNumber of Scene Points. Tab. 6 compares the number of total neural scene points on the TUM-RGBD dataset. Loopy-SLAM yields on average $14\\\\ \\\\%$ more scene points with the advantage of a $57\\\\,\\\\%$ gain in the ATE RMSE.  \\n\\n<html><body><table><tr><td>Method</td><td>Tracking /Iteration</td><td>Mapping /Iteration</td><td>Tracking /Frame</td><td>Mapping /Frame</td><td>Decoder Size</td><td>Embedding Size</td></tr><tr><td>NICE-SLAM[75]</td><td>32ms</td><td>182 ms</td><td>1.32 s</td><td>10.92 s</td><td>0.47MB</td><td>95.86MB</td></tr><tr><td>Vox-Fusion[71]</td><td>12 ms</td><td>55 ms</td><td>0.36s</td><td>0.55s</td><td>1.04 MB</td><td>0.149MB</td></tr><tr><td>Point-SLAM [43]</td><td>21 ms</td><td>33 ms</td><td>0.85 s</td><td>9.85 s</td><td>0.51MB</td><td>27.23MB</td></tr><tr><td>ESLAM[26]</td><td>15 ms</td><td>29 ms</td><td>0.12 s</td><td>0.44 s</td><td>0.01MB</td><td>45.46MB</td></tr><tr><td>GO-SLAM[74]</td><td></td><td></td><td>0.125s</td><td></td><td>0.04MB</td><td>48.07MB</td></tr><tr><td>Ours</td><td>21 ms</td><td>33 ms</td><td>0.85 s</td><td>9.85 s</td><td>0.51MB</td><td>60.92MB</td></tr></table></body></html>  \\n\\nTable 5. Runtime and Memory Usage on Replica office 0 .The decoder size is the memory of all MLP networks. The embedding size is the total memory of the map representation. Memory usage is competitive. It can be noted that GO-SLAM needs an extra $15.28\\\\;\\\\mathrm{MB}$ for their tracker.   \\n\\n\\n<html><body><table><tr><td>Method</td><td>fr1 / fr1 / desk desk2</td><td></td><td>fr1 / fr2 / woo 1 xyz</td><td>fr3/ office</td><td>Avg.</td><td>Avg. ATE</td></tr><tr><td>Point-SLAM [43]</td><td>65K</td><td>102K</td><td>51K</td><td>288K 303K</td><td>162K</td><td>8.92 cm</td></tr><tr><td>Ours</td><td>93K</td><td>162K</td><td>49K</td><td>298K 316K</td><td>184K</td><td>3.85 cm</td></tr></table></body></html>\\n\\nTable 6. Number of Scene Points on TUM-RGBD [ 51 ]. Compared to Point-SLAM, Loopy-SLAM yields on average $14\\\\,\\\\%$ more points in the scene representation. This is a relatively small gain in footprint from the submap creation and loop closure strategy which bring a $57\\\\;\\\\%$ improvement in terms of the ATE RMSE.  \\n\\nLimitations. While our proposed method shows competitive performance in terms of tracking on real-world data, we believe that a more robust tracker can be built with a combination of frame-to-model and frame-to-frame queues. We also believe that more robust and faster registrations can be obtained by making use of not only 3D point features, but also image features from the associated keyframes. Place recognition can likely be improved with learned variants. Currently, our implementation is using Pytorch and Open3D via python bindings and not optimized for realtime operation. To improve runtime many parts of the method should benefit from a direct CUDA implementation instead. Finally, our system does not implement relocalization, which is an important part of a robust SLAM system. We leave these things for future work.  \\n\\n# 5. Conclusion  \\n\\nWe proposed Loopy-SLAM, a dense RGBD SLAM system which utilizes submaps of neural point clouds for local mapping and tracking and a pose graph for global pose and map optimization. The underlying point based representation allows for local map updates by shifting the points, contrary to re-integration strategies seen in previous works which requires that all mapped frames are stored during runtime. In comparison, our submap based integration strategy has the potential for better scalability. Our experiments show that Loopy-SLAM leverages the benefit of the neural point cloud representation and equips it with loop closure to demonstrate state-of-the art dense reconstruction performance as well as competitive tracking and rendering accuracy to existing methods.  \\n\\nAcknowledgements. This work was supported by a VIVO collaboration project on real-time scene reconstruction and research grants from FIFA. We thank Manthan Patel for fruitful discussions.  \\n\\n# References  \\n\\n[1] Dejan Azinovi´c, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 6290–6301, 2022. 2   \\n[2] Wenjing Bian, Zirui Wang, Kejie Li, Jia-Wang Bian, and Victor Adrian Prisacariu. Nope-nerf: Optimising neural radiance field with no pose prior. arXiv preprint arXiv:2212.07388 , 2022. 2   \\n[3] Michael Bosse, Paul Newman, John Leonard, Martin Soika, Wendelin Feiten, and Seth Teller. An atlas framework for scalable mapping. In 2003 IEEE International Conference on Robotics and Automation (Cat. No. 03CH37422) , pages 1899–1906. IEEE, 2003. 2   \\n[4] Aljaˇz Boˇziˇc, Pablo Palafox, Justus Thies, Angela Dai, and Matthias Nießner. Transformerfusion: Monocular rgb scene reconstruction using transformers. arXiv preprint arXiv:2107.02191 , 2021. 2   \\n[5] E. Bylow, C. Olsson, and F. Kahl. Robust online 3d reconstruction combining a depth sensor and sparse feature points. In 2016 23rd International Conference on Pattern Recognition (ICPR) , pages 3709–3714, 2016. 2   \\n[6] Yan-Pei Cao, Leif Kobbelt, and Shi-Min Hu. Real-time highaccuracy three-dimensional reconstruction with consumer rgb-d cameras. ACM Transactions on Graphics (TOG) , 37 (5):1–16, 2018. 2 ,4 ,6   \\n[7] Jaesung Choe, Sunghoon Im, Francois Rameau, Minjun Kang, and In So Kweon. Volumefusion: Deep depth fusion for 3d scene reconstruction. In IEEE/CVF International Conference on Computer Vision (ICCV) , pages 16086–16095, 2021. 2   \\n[8] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition , pages 5556–5565, 2015. 2 ,3 ,4 ,5   \\n[9] Chi-Ming Chung, Yang-Che Tseng, Ya-Ching Hsu, XiangQian Shi, Yun-Hung Hua, Jia-Fong Yeh, Wen-Chin Chen, Yi-Ting Chen, and Winston H Hsu. Orbeez-slam: A realtime monocular visual slam with orb features and nerfrealized mapping. arXiv preprint arXiv:2209.13274 , 2022. 1 ,2   \\n[10] Brian Curless and Marc Levoy. Volumetric method for building complex models from range images. In SIGGRAPH Conference on Computer Graphics . ACM, 1996. 2 ,5   \\n[11] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. ScanNet: Richly-annotated 3D reconstructions of indoor scenes. In Conference on Computer Vision and Pattern Recognition (CVPR) . IEEE/CVF, 2017. 5 ,6 ,7   \\n[12] Angela Dai, Matthias Nießner, Michael Zollh¨ofer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3d reconstruction using on-the-fly surface reintegration. ACM Transactions on Graphics (ToG) , 36(4):   \\n1, 2017. 1 ,2 ,3 ,6 [13] Felix Endres, J¨urgen Hess, Nikolas Engelhard, J¨urgen Sturm, Daniel Cremers, and Wolfram Burgard. An evaluation of the rgb-d slam system. In 2012 IEEE international conference on robotics and automation , pages 1691–1696. IEEE, 2012.   \\n2 [14] Jakob Engel, Thomas Sch¨ops, and Daniel Cremers. Lsdslam: Large-scale direct monocular slam. In European conference on computer vision , pages 834–849. Springer, 2014.   \\n2 [15] Nicola Fioraio, Jonathan Taylor, Andrew Fitzgibbon, Luigi Di Stefano, and Shahram Izadi. Large-scale and drift-free surface reconstruction using online subvolume registration. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition , pages 4475–4483, 2015. 2 [16] Peter Henry, Michael Krainin, Evan Herbst, Xiaofeng Ren, and Dieter Fox. Rgb-d mapping: Using kinect-style depth cameras for dense 3d modeling of indoor environments. The international journal of Robotics Research , 31(5):647–663,   \\n2012.2[17] Peter Henry, Dieter Fox, Achintya Bhowmik, and Rajiv Mongia. Patch volumes: Segmentation-based consistent mapping with rgb-d cameras. In 2013 International Conference on 3D Vision-3DV 2013 , pages 398–405. IEEE, 2013.   \\n2 [18] Armin Hornung, Kai M Wurm, Maren Bennewitz, Cyrill Stachniss, and Wolfram Burgard. Octomap: An efficient probabilistic 3d mapping framework based on octrees. Autonomous robots , 34:189–206, 2013. 2 [19] Jiahui Huang, Shi-Sheng Huang, Haoxuan Song, and ShiMin Hu. Di-fusion: Online implicit 3d reconstruction with deep priors. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 8932–8941, 2021. 2 ,6 [20] Olaf K¨ahler, Victor Adrian Prisacariu, Carl Yuheng Ren, Xin Sun, Philip H. S. Torr, and David William Murray. Very high frame rate volumetric integration of depth images on mobile devices. IEEE Trans. Vis. Comput. Graph. , 21(11):1241–   \\n1250, 2015.2[21] Olaf K¨ahler, Victor A Prisacariu, and David W Murray. Realtime large-scale dense 3d reconstruction with loop closure. In Computer Vision–ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part VIII 14 , pages 500–516. Springer, 2016. 2 [22] Christian Kerl, J¨urgen Sturm, and Daniel Cremers. Dense visual slam for rgb-d cameras. In 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems , pages   \\n2100–2106. IEEE, 2013. 2 [23] Heng Li, Xiaodong Gu, Weihao Yuan, Luwei Yang, Zilong Dong, and Ping Tan. Dense rgb slam with neural implicit maps. arXiv preprint arXiv:2301.08930 , 2023. 2 [24] Chen Hsuan Lin, Wei Chiu Ma, Antonio Torralba, and Simon Lucey. BARF: Bundle-Adjusting Neural Radiance Fields. In International Conference on Computer Vision (ICCV) .IEEE/CVF, 2021. 2 [25] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics , 21(4):163–169, 1987. 5 ,6 [26] Mohammad Mahdi Johari, Camilla Carta, and Franc¸ois Fleuret. Eslam: Efficient dense slam system based on hybrid representation of signed distance fields. arXiv e-prints ,pages arXiv–2211, 2022. 1 ,2 ,6 ,7 ,8 [27] Robert Maier, J¨urgen Sturm, and Daniel Cremers. Submapbased bundle adjustment for 3d reconstruction from rgb-d data. In Pattern Recognition: 36th German Conference, GCPR 2014, M¨unster, Germany, September 2-5, 2014, Proceedings 36 , pages 54–65. Springer, 2014. 2 ,3 [28] R Maier, R Schaller, and D Cremers. Efficient online surface correction for real-time large-scale 3d reconstruction. arxiv   \\n2017. arXiv preprint arXiv:1709.03763 , 2017. 2 [29] Hidenobu Matsuki, Keisuke Tateno, Michael Niemeyer, and Federic Tombari. Newton: Neural view-centric mapping for on-the-fly large-scale slam. arXiv preprint arXiv:2303.13654 , 2023. 1 ,2 [30] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In European Conference on Computer Vision (ECCV) . CVF, 2020. 2 [31] Thomas M¨uller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG) , 41(4):1–15, 2022. 3 [32] Raul Mur-Artal and Juan D. Tardos. ORB-SLAM2: An Open-Source SLAM System for Monocular, Stereo, and RGB-D Cameras. IEEE Transactions on Robotics , 33(5):   \\n1255–1262, 2017. 2 ,4 ,6 [33] Zak Murez, Tarrence van As, James Bartolozzi, Ayan Sinha, Vijay Badrinarayanan, and Andrew Rabinovich. Atlas: Endto-end 3d scene reconstruction from posed images. In Computer Vision–ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part VII 16 ,pages 414–431. Springer, 2020. 2 [34] Richard A Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew W Fitzgibbon. Kinectfusion: Real-time dense surface mapping and tracking. In ISMAR , pages 127–136, 2011. 1 ,2 [35] Matthias Nießner, Michael Zollh¨ofer, Shahram Izadi, and Marc Stamminger. Real-time 3d reconstruction at scale using voxel hashing. ACM Transactions on Graphics (TOG) ,  \\n32, 2013. 1 ,2 [36] Michael Oechsle, Songyou Peng, and Andreas Geiger. UNISURF: Unifying Neural Implicit Surfaces and Radiance Fields for Multi-View Reconstruction. In International Conference on Computer Vision (ICCV) . IEEE/CVF, 2021. 2 [37] Helen Oleynikova, Zachary Taylor, Marius Fehr, Roland Siegwart, and Juan I. Nieto. Voxblox: Incremental 3d euclidean signed distance fields for on-board MAV planning. In 2017 IEEE/RSJ International Conference on Intelligent Robots and Systems, IROS 2017, Vancouver, BC, Canada, September 24-28, 2017 , pages 1366–1373. IEEE, 2017. 2 [38] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot perception. arXiv preprint arXiv:2204.02296 , 2022. 2   \\n[39] Victor Reijgwart, Alexander Millane, Helen Oleynikova, Roland Siegwart, Cesar Cadena, and Juan Nieto. Voxgraph: Globally consistent, volumetric mapping using signed distance function submaps. IEEE Robotics and Automation Letters , 5(1):227–234, 2019. 2   \\n[40] Antoni Rosinol, John J. Leonard, and Luca Carlone. NeRFSLAM: Real-Time Dense Monocular SLAM with Neural Radiance Fields. arXiv , 2022. 1 ,2   \\n[41] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In 2009 IEEE international conference on robotics and automation ,pages 3212–3217. IEEE, 2009. 5   \\n[42] Rafael Mu˜noz Salinas. DBoW3 dbow3, 2017. 3 ,4   \\n[43] Erik Sandstr¨om, Yue Li, Luc Van Gool, and Martin R Oswald. Point-slam: Dense neural point cloud-based slam. In International Conference on Computer Vision (ICCV) .IEEE/CVF, 2023. 1 ,2 ,3 ,4 ,5 ,6 ,7 ,8   \\n[44] Erik Sandstr¨om, Kevin Ta, Luc Van Gool, and Martin R Oswald. Uncle-slam: Uncertainty learning for dense neural slam. In International Conference on Computer Vision Workshops (ICCVW) . IEEE/CVF, 2023. 1 ,2   \\n[45] Mohamed Sayed, John Gibson, Jamie Watson, Victor Prisacariu, Michael Firman, and Cl´ement Godard. Simplerecon: 3d reconstruction without 3d convolutions. In European Conference on Computer Vision , pages 1–19. Springer, 2022. 2   \\n[46] Thomas Schops, Torsten Sattler, and Marc Pollefeys. BAD SLAM: Bundle adjusted direct RGB-D SLAM. In CVF/IEEE Conference on Computer Vision and Pattern Recognition (CVPR) , 2019. 1 ,2 ,6   \\n[47] Frank Steinbrucker, Christian Kerl, and Daniel Cremers. Large-scale multi-resolution surface reconstruction from rgb-d sequences. In IEEE International Conference on Computer Vision , pages 3264–3271, 2013. 2   \\n[48] Noah Stier, Alexander Rich, Pradeep Sen, and Tobias H¨ollerer. Vortx: Volumetric 3d reconstruction with transformers for voxelwise view selection and fusion. In 2021 International Conference on 3D Vision (3DV) , pages 320–330. IEEE, 2021. 2   \\n[49] Julian Straub, Thomas Whelan, Lingni Ma, Yufan Chen, Erik Wijmans, Simon Green, Jakob J Engel, Raul Mur-Artal, Carl Ren, Shobhit Verma, et al. The replica dataset: A digital replica of indoor spaces. arXiv preprint arXiv:1906.05797 ,2019. 5 ,6 ,7   \\n[50] J¨org St¨uckler and Sven Behnke. Multi-resolution surfel maps for efficient dense 3d modeling and tracking. Journal of Visual Communication and Image Representation , 25(1):137– 147, 2014. 2 ,4   \\n[51] J¨urgen Sturm, Nikolas Engelhard, Felix Endres, Wolfram Burgard, and Daniel Cremers. A benchmark for the evaluation of RGB-D SLAM systems. In International Conference on Intelligent Robots and Systems (IROS) . IEEE/RSJ, 2012. 5 ,6 ,8   \\n[52] Edgar Sucar, Shikun Liu, Joseph Ortiz, and Andrew J. Davison. iMAP: Implicit Mapping and Positioning in Real-Time. In International Conference on Computer Vision (ICCV) .IEEE/CVF, 2021. 1 ,2 ,6   \\n[53] Jiaming Sun, Yiming Xie, Linghao Chen, Xiaowei Zhou, and Hujun Bao. Neuralrecon: Real-time coherent 3d reconstruction from monocular video. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 15598– 15607, 2021. 2   \\n[54] Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. Advances in Neural Information Processing Systems, 33:7537–7547, 2020.3  \\n[55] Yijie Tang, Jiazhao Zhang, Zhinan Yu, He Wang, and Kai Xu. Mips-fusion: Multi-implicit-submaps for scalable and robust online neural rgb-d reconstruction. arXiv preprint arXiv:2308.08741 , 2023. 1 ,2 ,3 ,4 ,6   \\n[56] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Advances in neural information processing systems , 34:16558–16569, 2021. 2 ,3   \\n[57] Emanuele Vespa, Nikolay Nikolov, Marius Grimm, Luigi Nardi, Paul HJ Kelly, and Stefan Leutenegger. Efficient octree-based volumetric slam supporting signed-distance and occupancy mapping. IEEE Robotics and Automation Letters , 3(2):1144–1151, 2018. 2   \\n[58] Hao Wang, Jun Wang, and Wang Liang. Online reconstruction of indoor scenes from rgb-d streams. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition , pages 3271–3279, 2016. 2   \\n[59] Hengyi Wang, Jingwen Wang, and Lourdes Agapito. Coslam: Joint coordinate and sparse parametric encodings for neural real-time slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 13293–13302, 2023. 1 ,2 ,6 ,7   \\n[60] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Gosurf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. In International Conference on 3D Vision , 2022. 2   \\n[61] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction. In Advances in Neural Information Processing Systems (NeurIPS) , 2021. 2   \\n[62] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing ,13(4):600–612, 2004. 6   \\n[63] Zirui Wang, Shangzhe Wu, Weidi Xie, Min Chen, and Victor Adrian Prisacariu. Nerf–: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064 , 2021. 2   \\n[64] Silvan Weder, Johannes Schonberger, Marc Pollefeys, and Martin R Oswald. Routedfusion: Learning real-time depth map fusion. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 4887–4897, 2020. 2   \\n[65] Silvan Weder, Johannes L Schonberger, Marc Pollefeys, and Martin R Oswald. Neuralfusion: Online depth fusion in latent space. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 3162–3172, 2021. 2   \\n[66] Thibaut Weise, Thomas Wismer, Bastian Leibe, and Luc Van Gool. Online loop closure for real-time interactive 3d scanning. Computer Vision and Image Understanding , 115 (5):635–648, 2011.2  \\n[67] Thomas Whelan, John McDonald, Michael Kaess, Maurice Fallon, Hordur Johannsson, and John J. Leonard. Kintinuous: Spatially extended kinectfusion. In Proceedings of RSS ’12 Workshop on RGB-D: Advanced Reasoning with Depth Cameras, 2012.6  \\n[68] Thomas Whelan, Michael Kaess, Hordur Johannsson, Maurice Fallon, John J Leonard, and John McDonald. Real-time large-scale dense rgb-d slam with volumetric fusion. The International Journal of Robotics Research , 34(4-5):598–626, 2015.2  \\n[69] Thomas Whelan, Stefan Leutenegger, Renato Salas-Moreno, Ben Glocker, and Andrew Davison. Elasticfusion: Dense slam without a pose graph. In Robotics: Science and Systems (RSS) , 2015. 1 ,2 ,6   \\n[70] Zhixin Yan, Mao Ye, and Liu Ren. Dense visual slam with probabilistic surfel map. IEEE transactions on visualization and computer graphics , 23(11):2389–2398, 2017. 2 ,6   \\n[71] Xingrui Yang, Hai Li, Hongjia Zhai, Yuhang Ming, Yuqian Liu, and Guofeng Zhang. Vox-fusion: Dense tracking and mapping with voxel-based neural implicit representation. In IEEE International Symposium on Mixed and Augmented Reality (ISMAR) , pages 499–507. IEEE, 2022. 1 ,2 ,6 ,7 ,8   \\n[72] Xingrui Yang, Yuhang Ming, Zhaopeng Cui, and Andrew Calway. Fd-slam: 3-d reconstruction using features and dense matching. In 2022 International Conference on Robotics and Automation (ICRA) , pages 8040–8046. IEEE, 2022. 2   \\n[73] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In IEEE conference on computer vision and pattern recognition , pages 586–595, 2018. 6   \\n[74] Youmin Zhang, Fabio Tosi, Stefano Mattoccia, and Matteo Poggi. Go-slam: Global optimization for consistent 3d instant reconstruction. arXiv preprint arXiv:2309.02436 ,2023. 1 ,2 ,3 ,6 ,7 ,8   \\n[75] Zihan Zhu, Songyou Peng, Viktor Larsson, Weiwei Xu, Hujun Bao, Zhaopeng Cui, Martin R Oswald, and Marc Pollefeys. Nice-slam: Neural implicit scalable encoding for slam. In IEEE/CVF Conference on Computer Vision and Pattern Recognition , pages 12786–12796, 2022. 1 ,2 ,6 ,7 ,8   \\n[76] Zihan Zhu, Songyou Peng, Viktor Larsson, Zhaopeng Cui, Martin R Oswald, Andreas Geiger, and Marc Pollefeys. Nicer-slam: Neural implicit scene encoding for rgb slam. arXiv preprint arXiv:2302.03594 , 2023. 2  \"\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    \"title\": \"Loopy-SLAM: Dense Neural SLAM with Loop Closures\",\n",
    "}\n",
    "\n",
    "result = db.query_whole_text_by_title(params)\n",
    "# ppr(result)\n",
    "print(result)\n",
    "# 直接返回全文字符串的形式"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 8 论文ID查找关键词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['3D Mapping', 'SLAM', 'Semi-Supervised Learning', 'Representation Learning']\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    'paper_id': '65c19b5a939a5f40825fa482'\n",
    "}\n",
    "result = db.query_keywords_by_id(params)\n",
    "\n",
    "ppr(result)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 9 论文标题查找关键词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['SLAM',\n",
      " 'dense',\n",
      " 'mapping',\n",
      " 'localization',\n",
      " 'tracking',\n",
      " 'loop closure',\n",
      " 'pose graph optimization']\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    'title': 'Loopy-SLAM: Dense Neural SLAM with Loop Closures'\n",
    "}\n",
    "result = db.query_keywords_by_title(params)\n",
    "\n",
    "ppr(result)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 10 搜索标题中包含特定关键词的论文元数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['6434cfd690e50fcafd7a446e',\n",
      "  'Point-SLAM: Dense Neural Point Cloud-based SLAM'],\n",
      " ['65cec269939a5f40828f2c0c',\n",
      "  'Loopy-SLAM: Dense Neural SLAM with Loop Closures']]\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    \"title\": 'Dense Neural',\n",
    "}\n",
    "result = db.query_paper_metadata_that_title_contain(params=params)\n",
    "ppr(result)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 11 查找与输入标题相似的标题列表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['Dense Network Expansion for Class Incremental Learning',\n",
      " 'Probabilistic Prompt Learning for Dense Prediction',\n",
      " 'Integral Neural Networks',\n",
      " 'Dense Text-to-Image Generation with Attention Modulation',\n",
      " 'Large-batch Optimization for Dense Visual Predictions',\n",
      " 'GAN-Supervised Dense Visual Alignment',\n",
      " 'Efficient Training with Denoised Neural Weights',\n",
      " 'Imbedding Deep Neural Networks',\n",
      " 'Federated Neuro-Symbolic Learning',\n",
      " 'Learning Dense Correspondence for NeRF-Based Face Reenactment',\n",
      " 'Streaming Dense Video Captioning',\n",
      " 'Structured Knowledge Distillation for Dense Prediction',\n",
      " 'Loopy-SLAM: Dense Neural SLAM with Loop Closures',\n",
      " 'Going Denser with Open-Vocabulary Part Segmentation',\n",
      " 'Diffusion Model for Dense Matching',\n",
      " 'Soft Prompt Decoding for Multilingual Dense Retrieval',\n",
      " 'Bispectral Neural Networks',\n",
      " 'Learning to Act from Actionless Videos Through Dense Correspondences',\n",
      " '3D Face Reconstruction with Dense Landmarks',\n",
      " 'Non-deep Networks',\n",
      " 'Dense Depth Priors for Neural Radiance Fields from Sparse Input Views',\n",
      " 'Active Neural Mapping',\n",
      " 'Fast Neural Scene Flow',\n",
      " 'Scaling Laws for Dense Retrieval',\n",
      " 'Dense Representation Learning and Retrieval for Tabular Data Prediction',\n",
      " 'Neural Lens Modeling',\n",
      " 'Generative Retrieval As Multi-Vector Dense Retrieval',\n",
      " 'TaskPrompter: Spatial-Channel Multi-Task Prompting for Dense Scene '\n",
      " 'Understanding',\n",
      " 'Dense Multimodal Alignment for Open-Vocabulary 3D Scene Understanding',\n",
      " 'Deep Stochastic Mechanics',\n",
      " 'Automatic Dense Annotation of Large-Vocabulary Sign Language Videos.',\n",
      " 'Deep Generative Symbolic Regression',\n",
      " 'DrS: Learning Reusable Dense Rewards for Multi-Stage Tasks',\n",
      " 'Disentangling Learning Representations with Density Estimation',\n",
      " 'Global Latent Neural Rendering',\n",
      " 'Unsupervised Dense Retrieval Training with Web Anchors',\n",
      " 'Improved Algorithms for Neural Active Learning',\n",
      " 'Neural Optimal Transport.',\n",
      " 'A Personalized Dense Retrieval Framework for Unified Information Access',\n",
      " 'Learning Visibility for Robust Dense Human Body Estimation',\n",
      " 'Efficient Multitask Dense Predictor Via Binarization',\n",
      " 'Dense Optical Tracking: Connecting the Dots',\n",
      " 'Typo-Robust Representation Learning for Dense Retrieval.',\n",
      " 'DENSE RGB SLAM WITH NEURAL IMPLICIT MAPS',\n",
      " 'Latent Neural ODEs with Sparse Bayesian Multiple Shooting',\n",
      " 'Deep Variational Implicit Processes',\n",
      " 'Neural Causal Abstractions',\n",
      " 'Hidden Schema Networks',\n",
      " 'Sparsity in Continuous-Depth Neural Networks',\n",
      " 'DEEP: DEnoising Entity Pre-training for Neural Machine Translation',\n",
      " 'Neural Contractive Dynamical Systems',\n",
      " 'Ambiguity-Resistant Semi-Supervised Learning for Dense Object Detection.',\n",
      " 'Stitchable Neural Networks.',\n",
      " 'Curriculum Learning for Dense Retrieval Distillation',\n",
      " 'Debiased Self-Training for Semi-Supervised Learning',\n",
      " 'Scalable Neural Network Kernels',\n",
      " 'CycleMLP: A MLP-like Architecture for Dense Prediction',\n",
      " 'Time Does Tell: Self-Supervised Time-Tuning of Dense Image Representations',\n",
      " 'Probabilistic Neural Circuits',\n",
      " 'The Contextual Lasso: Sparse Linear Models Via Deep Neural Networks',\n",
      " 'Vid2Seq: Large-Scale Pretraining of a Visual Language Model for Dense Video '\n",
      " 'Captioning',\n",
      " 'DenseDINO: Boosting Dense Self-Supervised Learning with Token-Based '\n",
      " 'Point-Level Consistency.',\n",
      " 'DejaVu: Conditional Regenerative Learning to Enhance Dense Prediction.',\n",
      " 'Sparse tree-based initialization for neural networks',\n",
      " 'Delayed Reinforcement Learning by Imitation',\n",
      " 'ConTextual Masked Auto-Encoder for Dense Passage Retrieval',\n",
      " 'Generative Semantic Segmentation',\n",
      " 'Prediction-Guided Distillation for Dense Object Detection',\n",
      " 'Fully Hyperbolic Neural Networks',\n",
      " 'Towards Understanding the Condensation of Neural Networks at Initial '\n",
      " 'Training',\n",
      " 'Spectrally Transformed Kernel Regression',\n",
      " 'Deep Graph Reprogramming',\n",
      " 'Latent Bottlenecked Attentive Neural Processes',\n",
      " 'Dense Retrieval with Continuous Explicit Feedback for Systematic Review '\n",
      " 'Screening Prioritisation',\n",
      " 'Treeformer: Dense Gradient Trees for Efficient Attention Computation.',\n",
      " 'Neural Structure Learning with Stochastic Differential Equations',\n",
      " 'Learning Disentangled Representations of Negation and Uncertainty',\n",
      " 'Learning A Sparse Transformer Network for Effective Image Deraining',\n",
      " 'Coarse-to-Fine Sparse Sequential Recommendation',\n",
      " 'Learning Multiple Dense Prediction Tasks from Partially Annotated Data',\n",
      " 'Exploring Set Similarity for Dense Self-supervised Representation Learning',\n",
      " 'MovieChat: from Dense Token to Sparse Memory for Long Video Understanding.',\n",
      " 'Deep Neural Network Initialization with Sparsity Inducing Activations',\n",
      " 'CrossKD: Cross-Head Knowledge Distillation for Dense Object Detection',\n",
      " 'SGS-SLAM: Semantic Gaussian Splatting for Neural Dense SLAM',\n",
      " 'Learning with Stochastic Orders',\n",
      " 'Point-SLAM: Dense Neural Point Cloud-based SLAM',\n",
      " 'Generative Image Dynamics',\n",
      " 'Super-Resolution Neural Operator.',\n",
      " 'Unbiased Supervised Contrastive Learning',\n",
      " 'Adversarial Retriever-Ranker for Dense Text Retrieval.',\n",
      " 'Forward Learning of Graph Neural Networks',\n",
      " 'A Dense Material Segmentation Dataset for Indoor and Outdoor Scene Parsing.',\n",
      " 'Masked Image Training for Generalizable Deep Image Denoising',\n",
      " 'A Bregman Learning Framework for Sparse Neural Networks',\n",
      " 'Modeling Sequential Sentence Relation to Improve Cross-lingual Dense '\n",
      " 'Retrieval',\n",
      " 'Neural Attentive Circuits',\n",
      " 'Towards Robust Dense Retrieval Via Local Ranking Alignment.',\n",
      " 'ClearCLIP: Decomposing CLIP Representations for Dense Vision-Language '\n",
      " 'Inference',\n",
      " 'A Dense Reward View on Aligning Text-to-Image Diffusion with Preference.']\n"
     ]
    }
   ],
   "source": [
    "params = {\n",
    "    \"title\": 'Dense Neural',\n",
    "}\n",
    "result = db.titles_like(params=params)\n",
    "ppr(result)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
