{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import nest_asyncio\n",
    "nest_asyncio.apply()\n",
    "import sys\n",
    "import os\n",
    "# sys.path.append(os.path.abspath('../..'))  # 添加项目根目录到Python路径\n",
    "\n",
    "\n",
    "from dotenv import load_dotenv\n",
    "\n",
    "load_dotenv()\n",
    "\n",
    "topic = \"What are the methods to enhance the planning capability of large-scale models, and what are their respective strengths and weaknesses?\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'research_agent'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[2], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mresearch_agent\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mcore\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpipeline\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Pipeline\n\u001b[0;32m      2\u001b[0m pipeline \u001b[38;5;241m=\u001b[39m Pipeline()\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'research_agent'"
     ]
    }
   ],
   "source": [
    "from research_agent.core.pipeline import Pipeline\n",
    "pipeline = Pipeline()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from research_agent.core.pipeline import Pipeline\n",
    "pipeline = Pipeline()\n",
    "introduction, related_work, conclusion = await pipeline.write_a_draft_survey(topic)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "-1.prompt;response;json_schema\n",
    "-2.response_format\n",
    "-3 citation_processor——————bathsize控制\n",
    "-4 更优化更快\n",
    "-5 graph_rag\n",
    "knowledge_graph:重点在于推导\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from research_agent.core.query import Query\n",
    "query = Query()\n",
    "query_answer = await query.query_by_content(\"loss function\",top_k=100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "for q in query_answer:\n",
    "    original_filename = re.findall(r\"Data_+(.*?)_with\", q[\"entity\"][\"original_filename\"])\n",
    "    print(original_filename[0])\n",
    "    aa = re.findall(r\"(.*)(\\d{4})?\", original_filename[0])\n",
    "    # print(aa)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from research_agent.core.pipeline import Pipeline\n",
    "\n",
    "pipeline = Pipeline()\n",
    "\n",
    "\n",
    "introduction, related_work, conclusion = asyncio.run(pipeline.iteration(topic))\n",
    "# 参考文献处理pipeline已经添加到总pipeline中\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "\n",
    "\n",
    "with open(r\"D:\\GoodStudy\\FX15\\FX15H\\final_work\\FX15_research_agent\\summary-generation-match\\survey_output\\draft_with_citations.md\", \"r\", encoding=\"utf-8\") as f:\n",
    "    content = f.read()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "#2 Multi-Modal Knowledge Graphs\n",
      "## 2.1 Multi-Modal Knowledge Graph Construction\n",
      "\n",
      "The construction of multi-modal knowledge graphs (MMKGs) involves integrating text and image data to create a more comprehensive representation of entities and their relationships, enabling various downstream applications such as question answering and recommendation systems.<sup>{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> Recent studies have explored different approaches to construct MMKGs, including methods that align entities and relations across different modalities and techniques for fusing heterogeneous features.<sup>{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup>\n",
      "\n",
      "One such approach is the Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment (ACK-MMEA) framework, which addresses the issue of contextual gaps in MMKGs<sup>{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup>. ACK-MMEA proposes a multi-modal attribute uniformization method to derive attribute-consistent KGs (ACKGs), ensuring that each entity possesses only one attribute for each modality<sup>{\"chunk_id\":\"0\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup><sup>{\"chunk_id\":\"2\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup><sup>{\"chunk_id\":\"3\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup><sup>{\"chunk_id\":\"6\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}</sup>.\n",
      "\n",
      "Another notable approach is the MKGformer model, which introduces a hybrid transformer framework for unified multimodal knowledge graph completion. MKGformer implements multi-level fusion with coarse-grained prefix-guided interaction and fine-grained correlation-aware fusion modules to reduce modal heterogeneity and alleviate noise from irrelevant visual elements. This model has demonstrated state-of-the-art performance on multimodal link prediction, multimodal named entity recognition, and multimodal relation extraction tasks<sup>{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223871\"}</sup><sup>{\"chunk_id\":\"6\", \"paper_id\":\"658254b9939a5f4082bbdf51\"}</sup>.\n",
      "\n",
      "In summary, the construction of multi-modal knowledge graphs is an ongoing area of research, with recent studies focusing on aligning entities and relations across modalities and fusing heterogeneous features.<sup>{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> The development of more advanced techniques in this area holds great potential for enhancing the coverage and depth of knowledge graphs, enabling a wide range of downstream applications.\n",
      "\n",
      "\n",
      "## 2.2 Multi-Modal Knowledge Graph Applications\n",
      "\n",
      "Multi-modal knowledge graphs (MMKGs) have found extensive applications in various domains, including question answering, recommendation systems, and passage re-ranking. MMKGs provide sufficient background knowledge to enrich the representation of entities and concepts, especially for the long-tail ones, and enable the understanding of unseen objects in images. They also facilitate multi-modal reasoning and provide additional features to bridge information gaps in NLP tasks. The construction of MMKGs involves associating symbolic knowledge with corresponding images, either by labeling images with symbols or by grounding symbols to images. This survey on MMKGs highlights their importance and potential in enhancing multi-modal tasks.\n",
      "\n",
      "### 2.2.1 Question Answering\n",
      "\n",
      "Question answering systems have significantly benefited from the incorporation of MMKGs. By integrating visual information with textual data, these systems can provide more informative and context-rich answers to user queries. For instance, ReSee demonstrates that visual knowledge can complement existing textual knowledge, even in the presence of document knowledge, enhancing model performance and diversity in responses. Additionally, Text-IF illustrates the leverage of semantic text guidance for interactive image fusion, which could be applied to enhance dialogue systems. Further, works like Bridging the visual semantic gap in VLN via semantically richer instructions show improvements in using visual information for navigation, which can be analogous to enhancing dialogue systems.\n",
      "\n",
      "### 2.2.2 Recommendation Systems\n",
      "\n",
      "MMKGs have also proven to be valuable in recommendation systems. Multi-modal knowledge graphs enhance multi-modal recommender systems by providing external MMKGs that can enrich item representations and help solve the cold-start problem in collaborative filtering based strategies, leading to more personalized and explainable recommendations. By incorporating visual data, these systems can offer more personalized and relevant recommendations to users. The system utilizes visual features extracted from product images to enhance the recommendation process, resulting in more accurate and tailored suggestions.\n",
      "\n",
      "### 2.2.3 Passage Re-ranking\n",
      "\n",
      "Passage re-ranking tasks have also benefited from the use of MMKGs, as demonstrated by the improvement in cross-task generalization ability using retrieval augmentation and the potential of leveraging LLMs for retrieval tasks. MMKGs provide background knowledge to enrich entity and concept representation, enable understanding of unseen objects, facilitate multi-modal reasoning, and bridge information gaps in NLP tasks. Additionally, learning to rank in generative retrieval has shown to enhance passage ranking performance. By integrating visual data with textual information, these systems can generate more context-aware and relevant rankings of passages. For instance, the ReSee system demonstrates that visual knowledge can complement existing textual knowledge, enhancing model performance and diversity in responses. Furthermore, the ReSee system's evaluation metrics highlight the importance of informativeness and relevance in generated responses, which are crucial for achieving context-aware rankings. Additionally, research in image-text embedding learning, such as the work by Li et al., underscores the semantic reasoning capabilities that contribute to aligning visual and textual information, potentially improving the context-awareness of ranking systems.\n",
      "\n",
      "In summary, multi-modal knowledge graphs have found extensive applications in various domains, including question answering, recommendation systems, and passage re-ranking. These applications leverage the integration of text and image data to provide more comprehensive and accurate results. Ongoing research is focused on developing more advanced techniques to enhance the performance and applicability of MMKGs in these domains.\n",
      "\n",
      "[9] Z. Wang, et al., 'Multi-Modal Knowledge Graph Construction and Application: A Survey', arXiv preprint arXiv:2209.15023, 2022.\n",
      "[10] K. Duan, et al., 'MKGformer: A Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion', arXiv preprint arXiv:2209.15023, 2022.\n",
      "[11] Z. Wang, et al., 'Multi-Modal Knowledge Graph Construction and Application: A Survey', arXiv preprint arXiv:2209.15023, 2022.\n",
      "\n",
      "#3 Temporal Knowledge Graphs\n",
      "## 3 Temporal Knowledge Graphs\n",
      "\n",
      "### 3.1 Temporal Knowledge Graph Reasoning\n",
      "\n",
      "Temporal knowledge graphs (TKGs) represent a dynamic and evolving form of knowledge graph that incorporates temporal information<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}</sup>. The reasoning over TKGs involves predicting missing facts and making inferences about the future based on historical data. TKGs are dynamic multirelational graph data used to record evolutionary events and knowledge in the real world, with each fact represented by a quadruple $(s,r,o,t)$, such as (Obama, run for, president, 2012). Reasoning over TKGs primarily has two settings: interpolation and extrapolation. Interpolation aims at inferring missing facts that occur at time $t$, where $t_{0}<t<t_{n}$, while extrapolation attempts to predict facts that occur at time $t$ with $t>t_{n}$. For accurately inferring a future fact, it is common to consider the long-ago history related to the fact and recent events because they carry important long- and short-term time dependencies for prediction. Existing methods for TKG reasoning, such as Know-Evolve, RE-NET, and RE-GCN, have limitations in modeling long- and short-term temporal dependencies, particularly in ignoring the explicit dependencies between different entities at different timestamps in long-term history. Furthermore, they overlook the adaptive integration of long- and short-term information. To address these challenges, various approaches have been proposed, including the Hierarchical Relational Graph Neural Network (HGLS) and the Adaptive Path-Memory Network for Temporal Knowledge Graph Reasoning<sup>{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}</sup>.\n",
      "\n",
      "Recent studies have explored various methods for temporal reasoning in knowledge graphs. For instance, the paper 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning' introduces a Hierarchical Relational Graph Neural Network (HGLS)<sup>{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup> to model long- and short-term dependencies in TKGs.\n",
      "\n",
      "Another notable approach is the Time-Aware Knowledge Representations of Dynamic Objects with Multidimensional Persistence, which utilizes persistent homology to capture the topological structure of temporal graphs. This method, TMP, merges multi-persistence and zigzag persistence to enable the extraction of the most salient data shape information over time and is particularly useful for forecasting on benchmark traffic flow, Ethereum blockchain, and electrocardiogram datasets.\n",
      "\n",
      "In summary, temporal knowledge graph reasoning is an active area of research, with recent studies focusing on modeling long- and short-term dependencies and capturing the dynamic evolution of knowledge graphs. Examples include the Temporal Inductive Path Neural Network (TiPNN) which models historical information in an entity-independent perspective, and the Time-aware Dynamic Graph Embedding method which maintains asynchronous structural evolutions within the graph. These methods aim to predict future facts based on historical occurrences and uncover structural dependencies within historical subgraphs and temporal patterns. Additionally, research has explored the integration of textual information into knowledge graphs, enhancing their coverage and precision, which is particularly important in multilingual settings. Furthermore, the Adaptive Path-Memory Network (DaeMon) has shown effectiveness in temporal knowledge graph reasoning by adaptively capturing temporal path information between query subject and object candidates across time.\n",
      "\n",
      "### 3.2 Temporal Knowledge Graph Applications\n",
      "\n",
      "Temporal knowledge graphs have found applications in various domains, including event prediction, question answering, and time series forecasting. Event prediction is a crucial task in temporal knowledge graph reasoning, with approaches such as RE-NET and TiPNN proposing models to predict future events based on historical knowledge. Question answering benefits from the temporal information in knowledge graphs, as shown in works that integrate textual information into downstream applications.\n",
      "\n",
      "For instance, the study 'Temporal Aggregation and Propagation Graph Neural Networks for Dynamic Representation' introduces a framework that utilizes TKGs for event prediction.\n",
      "\n",
      "In another study, 'Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs', TKGs are utilized for question answering<sup>{\"paper_title\":\"Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning\", \"chunk_id\":\"1\",\n",
      "\n",
      "\n",
      "## 3.2 Temporal Knowledge Graph Applications\n",
      "\n",
      "Temporal knowledge graphs (TKGs) have demonstrated significant utility in various domains, including event prediction, question answering, and time series forecasting<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6434cfcc90e50fcafd7a031f\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}</sup><sup>{\"chunk_id\":\"0\", \"paper_id\":\"6466fafbd68f896efaeb752d\"}</sup>. These applications harness the temporal dimension encoded in TKGs to generate predictions that are both accurate and contextually informed.\n",
      "\n",
      "### 3.2.1 Event Prediction\n",
      "\n",
      "TKGs are instrumental in event prediction tasks, where the goal is to anticipate future occurrences based on historical data. The paper 'Temporal Aggregation and Propagation Graph Neural Networks for Dynamic Representation' introduces a framework that leverages TKGs for this purpose. This framework employs a graph neural network architecture capable of capturing temporal dependencies between entities, thereby enabling precise forecasting of future events. Additionally, 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning'<sup>{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup> discusses the use of TKGs in event prediction and reasoning over time, while 'Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning'<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup> and 'Provably Expressive Temporal Graph Networks'<sup>{\"chunk_id\":\"7\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}</sup> also contribute to the understanding of TKGs in event prediction and temporal reasoning.\n",
      "\n",
      "### 3.2.2 Question Answering\n",
      "\n",
      "In the realm of question answering, TKGs contribute to more informed and contextually rich responses. The study 'Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs' showcases the use of TKGs in this domain<sup>{\"chunk_id\":\"0\", \"paper_id\":\"6602562413fb2c6cf62a56f6\"}</sup>. By integrating temporal data with textual information, the system is able to provide answers that are not only accurate but also highly relevant to the context of the query.\n",
      "\n",
      "### 3.2.3 Time Series Forecasting\n",
      "\n",
      "TKGs also play a vital role in time series forecasting, particularly in financial markets. The paper 'A Comprehensive Study on Large-Scale Graph Training: Benchmarking and Rethinking' delves into the application of TKGs for forecasting financial time series<sup>{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup>. By modeling temporal dependencies between entities, the framework can generate more reliable forecasts of future market trends.\n",
      "\n",
      "In conclusion, the applications of temporal knowledge graphs are diverse and impactful, spanning event prediction, question answering, and time series forecasting. These applications underscore the value of integrating temporal information into knowledge graphs, leading to more precise and contextually aware predictions. As research progresses, the development of more sophisticated techniques to enhance the performance and applicability of TKGs in these domains continues to be a focal point. Recent progress in graph neural networks has shed new light on early diagnosis by synthesizing brain graphs across different axes<sup>{\"chunk_id\":\"10\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}</sup>, and there is still room for improvement in achieving a greater understanding of GNN models and their reproducibility. Additionally, the need to develop GNNs that can be trained with limited data is evident. In the realm of network neuroscience, the clinical utility of brain graphs for various brain disorders is a holy grail, and advancements in contactless cardiovascular monitoring using deep learning radars could lead to continuous health monitoring<sup>{\"chunk_id\":\"10\", \"paper_id\":\"5f51b90d9fced0a24bdc7f82\"}</sup>. The field also grapples with the challenge of fairness in cross-domain medical image segmentation and classification<sup>{\"chunk_id\":\"1\", \"paper_id\":\"6694828f01d2a3fbfc8653d6\"}</sup>, and the construction and application of multi-modal knowledge graphs present new opportunities and directions for future research<sup>{\"chunk_id\":\"16\n",
      "\n",
      "#4 Applications of Knowledge Graphs\n",
      "## 4.1 Question Answering\n",
      "\n",
      "Question answering (QA) systems have significantly advanced with the integration of knowledge graphs, particularly in enhancing the accuracy and depth of responses. These systems traditionally relied on textual data alone, which limited their ability to provide comprehensive answers<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64659ad1d68f896efa8751ac\"}</sup>. However, by incorporating knowledge graphs, QA systems can access a wealth of structured information, enabling them to generate more informed and contextually rich responses<sup>{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"615fb6ef5244ab9dcb9c3c26\"}</sup>.\n",
      "\n",
      "For instance, the study in [1] presents a framework that utilizes knowledge graphs to enhance the performance of QA systems. The framework encodes knowledge graphs into neural networks, enabling the system to generate more accurate answers by exploiting graph structures during inference. This approach has been shown to improve the performance of QA systems on benchmarks like SQuAD and WebQA<sup>{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}</sup>.\n",
      "\n",
      "In addition to encoding knowledge graphs, attention mechanisms can be employed to selectively focus on relevant parts of the graph during inference<sup>{\"chunk_id\":\"3\", \"paper_id\":\"6076b8fc91e0113d725742e7\"}</sup>. For example, the study in [2] introduces an attention-based model that enhances the performance of QA systems by focusing on relevant entities and relations in the knowledge graph<sup>{\"chunk_id\":\"2\", \"paper_id\":\"626603225aee126c0f23378b\"}</sup>. This approach has been shown to improve the accuracy of QA systems on benchmarks like SQuAD and WebQA<sup>{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based QA. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}</sup>. For instance, the study in [3] demonstrates the effectiveness of GNNs in modeling relational data, achieving state-of-the-art performance on several benchmark datasets. The authors propose a novel GNN architecture that captures both local and global dependencies in knowledge graphs, resulting in more accurate predictions<sup>{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b61fa\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"64d074bf3fda6d7f06ce927c\"}</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into QA systems has significantly enhanced their performance, enabling more informed and contextually rich responses. Ongoing research is focused on developing more advanced techniques, such as attention mechanisms and graph neural networks, to further improve the accuracy and depth of QA systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] A.\n",
      "\n",
      "\n",
      "## 4.2 Recommendation Systems\n",
      "\n",
      "Recommendation systems have seen a substantial boost in performance with the integration of knowledge graphs<sup>{\"chunk_id\":\"1\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}</sup>. By incorporating rich relational information, these systems can offer more personalized and relevant suggestions to users<sup>{\"chunk_id\":\"2\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}</sup>. For example, the study in [1] presents a recommendation system that leverages knowledge graphs to provide personalized product recommendations. The system utilizes relational features extracted from knowledge graphs to enhance the recommendation process, resulting in more accurate and tailored suggestions<sup>{\"chunk_id\":\"3\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}</sup>.\n",
      "\n",
      "In addition to relational information, knowledge graphs can also incorporate textual data, such as product descriptions, to further improve the relevance of recommendations<sup>{\"chunk_id\":\"4\", \"paper_id\":\"647eaf51d68f896efad41dca\"}</sup>. For instance, the study in [2] introduces a framework that fuses knowledge graphs with textual data for recommendation. The framework encodes knowledge graphs and textual data into a shared embedding space, enabling the system to generate more informed and contextually rich recommendations<sup>{\"chunk_id\":\"5\", \"paper_id\":\"647eaf51d68f896efad41dca\"}</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based recommendation<sup>{\"chunk_id\":\"6\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}</sup>. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>{\"chunk_id\":\"7\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>{\"chunk_id\":\"8\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}</sup>. For instance, the study in [3] demonstrates the effectiveness of GNNs in modeling relational data, achieving state-of-the-art performance on several benchmark datasets. The authors propose a novel GNN architecture that captures both local and global dependencies in knowledge graphs, resulting in more accurate predictions<sup>{\"chunk_id\":\"9\", \"paper_id\":\"657a6ad2939a5f4082cf7d8b\"}</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into recommendation systems has significantly enhanced their performance, enabling more personalized and relevant suggestions<sup>{\"chunk_id\":\"10\", \"paper_id\":\"6350bc6e90e50fcafdecf356\"}</sup>. Ongoing research is focused on developing more advanced techniques, such as graph neural networks, to further improve the accuracy and depth of recommendation systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] A. Vaswani, et al., 'Attention Is All You Need', Advances in Neural Information Processing Systems, 2017.\n",
      "[3] P. Veličković, et al., 'Graph Attention Networks', International Conference on Learning Representations, 2018.\n",
      "\n",
      "\n",
      "## 4.3 Passage Re-ranking\n",
      "\n",
      "Passage re-ranking is a crucial task in information retrieval, aiming to refine the initial ranking of search results by considering additional context<sup>{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>. Knowledge graphs have proven to be a valuable resource for this task, as they provide a rich source of structured information that can enhance the relevance of search results<sup>{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup><sup>{\"chunk_id\":\"0\", \"paper_id\":\"6409167e90e50fcafd973ce1\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}</sup><sup>{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}</sup>. By integrating knowledge graphs with textual data, passage re-ranking systems can generate more context-aware and accurate rankings of search results<sup>{\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}</sup>.\n",
      "\n",
      "For instance, the study in [1] introduces a framework that utilizes knowledge graphs to improve the performance of passage re-ranking tasks. The framework encodes knowledge graphs into neural networks, enabling the system to generate more accurate rankings by exploiting graph structures during inference. This approach has been shown to improve the performance of passage re-ranking systems on benchmarks like TREC-CAR and MS MARCO<sup>{\"chunk_id\":\"6\", \"paper_id\":\"6441ff2ded329dcc6bb745d5\"}</sup><sup>{\"chunk_id\":\"7\", \"paper_id\":\"6296d90f5aee126c0f730c91\"}</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based passage re-ranking. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into passage re-ranking systems has significantly enhanced their performance, enabling more context-aware and accurate rankings of search results. Ongoing research is focused on developing more advanced techniques, such as graph neural networks, to further improve the accuracy and depth of passage re-ranking systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] P. Veličković, et al., 'Graph Attention Networks', International Conference on Learning Representations, 2018.\n",
      "\n",
      "\n",
      "## 5 Conclusion\n",
      "\n",
      "This survey provides a comprehensive overview of the technology development roadmap for knowledge graph, a pivotal domain in artificial intelligence. It delves into key themes such as knowledge graph completion, multi-modal knowledge graphs, temporal knowledge graphs, and their applications. The survey draws insights from 30 pivotal papers, offering a structured exploration of the current state of research in this domain.\n",
      "\n",
      "Knowledge graph completion encompasses methods like knowledge graph embedding, graph neural networks, and sequence-to-sequence models<sup>{\"chunk_id\":\"1\", \"paper_id\":\"628afb4c5aee126c0f04e4a6\"}</sup> <sup>{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup> <sup>{\"chunk_id\":\"2\", \"paper_id\":\"625f6bf65aee126c0ffb35b5\"}</sup> <sup>{\"chunk_id\":\"1\", \"paper_id\":\"63292f6990e50fcafd2ebdc6\"}</sup> <sup>{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}</sup>. Multi-modal knowledge graphs explore the construction and application of knowledge graphs integrating text and images. Temporal knowledge graphs focus on reasoning and prediction in knowledge graphs with temporal information<sup>{\"chunk_id\":\"2\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup> <sup>{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup> <sup>{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}</sup> <sup>{\"chunk_id\":\"4\", \"paper_id\":\"658b941f939a5f40825dc6b6\"}</sup>.\n",
      "\n",
      "The survey identifies limitations and future research directions in each theme, such as the need for extending interpretability to more general architectures like geometric deep learning and transformer architectures in the context of sparse interaction additive networks<sup>{\"chunk_id\":\"4\", \"paper_id\":\"632a810e90e50fcafd081ac0\"}</sup>, exploring the theoretical understanding of how benefits scale with dataset dimensionality and sample size<sup>{\"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}</sup>, investigating semantic motion representation in dynamic and interactive scenarios<sup>{\"chunk_id\":\"5\", \"paper_id\":\"669729b401d2a3fbfc786fc2\"}</sup>, addressing mental health stigma beyond binary genders and intersectional biases<sup>{\"chunk_id\":\"4\", \"paper_id\":\"635b486890e50fcafd32fae3\"}</sup>, and improving tracking performance in scenes with many similar targets<sup>{\"chunk_id\":\"7\", \"paper_id\":\"638eb2eb90e50fcafd58a97a\"}</sup>.\n",
      "\n",
      "It highlights the significance of recent advancements in knowledge graph completion, multi-modal knowledge graph construction, and temporal knowledge graph reasoning<sup>{\"chunk_id\":\"5\", \"paper_id\":\"634e194790e50fcafd24f328\"}</sup> <sup>{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> <sup>{\"chunk_id\":\"5\", \"paper_id\":\"6302f3ab90e50fcafd5b318f\"}</sup> <sup>{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}</sup>. By providing a structured overview of the current state of research, the survey aims to offer insights into the technology development roadmap for knowledge graph, underscoring its importance in the field of artificial intelligence<sup>{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> <sup>{\"chunk_id\":\"0\", \"paper_id\":\"64563874d68f896efacf3e48\"}</sup>.\n",
      "\n",
      "\n",
      "## Conclusion \n",
      "This survey delves into the technology development roadmap for knowledge graph, encompassing advancements in learning on complex data, including network-valued data. The focus is on small sample, large graph problems—a setting where traditional machine learning approaches are limited<sup>{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e162\"}</sup><sup>{\"chunk_id\":\"6\", \"paper_id\":\"615e657b5244ab9dcbf21f44\"}</sup><sup>{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e162\"}</sup><sup>{\"chunk_id\":\"10\", \"paper_id\":\"65ea8bc413fb2c6cf6308c3b\"}</sup>. The paper introduces a graph distance based on non-parametric graph models, leading to effective algorithms for clustering and two-sample testing. Empirical studies demonstrate superior performance in accuracy and scalability compared to methods based on complex graph similarities or metrics<sup>{\"paper_title\":\"Rethinking the Effectiveness of Graph Classification Datasets in Benchmarks for Assessing GNNs\", \"chunk_id\":\"1\", \"paper_id\":\"668c9e3b01d2a3fbfc3942a7\"}</sup> <sup>{\"paper_title\":\"DAFA: Distance-Aware Fair Adversarial Training\", \"chunk_id\":\"3\", \"paper_id\":\"65b077f1939a5f4082b1af4f\"}</sup> <sup>{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"4\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}</sup> <sup>{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"5\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}</sup> <sup>{\"paper_title\":\"Interpretable by Design: Learning Predictors by Composing Interpretable Queries\", \"chunk_id\":\"5\", \"paper_id\":\"6456ee93d68f896efa58f3e1\"}</sup>. The proposed clustering algorithms and two-sample test show better performance on large graphs, theoretically supported by consistency in the limit of n→∞. However, the theoretical results rely on smoothness and equivalence assumptions, which are necessary for meaningful non-parametric approaches<sup>{\"paper_title\": \"Prior Mismatch and Adaptation in PnP-ADMM with a Nonconvex Convergence Analysis\", \"chunk_id\": \"2\", \"paper_id\": \"651b79a33fda6d7f0628d3b7\"}</sup> <sup>{\"paper_title\": \"On Convergence of Incremental Gradient for Non-Convex Smooth Functions\", \"chunk_id\": \"3\", \"paper_id\": \"6476d21ad68f896efaf72f93\"}</sup> <sup>{\"paper_title\": \"A Projection-free Algorithm for Constrained Stochastic Multi-level Composition Optimization\", \"chunk_id\": \"1\", \"paper_id\": \"6204827f5aee126c0f77da60\"}</sup> <sup>{\"paper_title\": \"Distributed Distributionally Robust Optimization with Non-Convex Objectives\", \"chunk_id\": \"12\", \"paper_id\": \"634cc7a390e50fcafd162fb8\"}</sup>. The poor performance of graph kernels and graph matching in clustering and small sample problems highlights the need for further studies on these methods. Fundamental research, combining graphon based approaches and kernels, could lead to improved techniques. Algorithmic modifications, such as estimation of K, would be useful in practice<sup>{\"paper_title\":\"Predictive Querying for Autoregressive Neural Sequence Models\", \"chunk_id\":\"5\", \"paper_id\":\"6347820490e50fcafd2c4279\"}</sup> <sup>{\"paper_title\":\"Revisiting Data Augmentation in Deep Reinforcement Learning\", \"chunk_id\":\"4\", \"paper_id\":\"65d41682939a5f4082e16473\"}</sup> <sup>{\"paper_title\":\"Simplifying Model-based RL: Learning Representations, Latent-space Models, and Policies with One Objective\", \"chunk_id\":\"6\", \"paper_id\":\"63292f6890e50fcafd2eba3a\"}</sup> <sup>{\"paper_title\":\"Taking a Step Back with KCal: Multi-Class Kernel-Based Calibration for Deep Neural Networks\", \"chunk_id\":\"3\", \"paper_id\":\"620dbcf95aee126c0f5db21c\"}</sup> <sup>{\"paper_title\":\"On Elimination Strategies for Bandit\n"
     ]
    }
   ],
   "source": [
    "print(content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import json_repair\n",
    "from research_agent.core.query import Query\n",
    "import re\n",
    "import logging\n",
    "logger = logging.getLogger(__name__)\n",
    "query = Query()\n",
    "def extract_citations(markdown_text):\n",
    "    \"\"\"提取引用，返回去重后的引用列表\"\"\"\n",
    "    matches = re.findall(r\"<sup>(.*?)</sup>\", markdown_text)\n",
    "    citations = set()  # 使用集合来避免重复\n",
    "    for match in matches:\n",
    "        parts = match.split(';')\n",
    "        for part in parts:\n",
    "            citations.add(part.strip())  # 使用集合自动去重\n",
    "    return list(citations)\n",
    "\n",
    "async def replace_citations_with_numbers(citations, markdown_text):\n",
    "    \"\"\"替换引用为数字，并生成参考文献列表\"\"\"\n",
    "    logger.info(\"开始替换引用为数字，处理 %d 个引用\", len(citations))\n",
    "    citation_to_ids = {}\n",
    "    ids = []\n",
    "    temp_dic = {}\n",
    "\n",
    "    for citation in citations:\n",
    "        try:\n",
    "            citation_content = citation\n",
    "            citation_data = json.loads(json_repair.repair_json(citation))  # 修复和解析 JSON\n",
    "            query_c = await query.query_by_id(citation_data[\"paper_id\"], top_k=100)\n",
    "\n",
    "            if not query_c:  # 如果查询不到结果，跳过该引用\n",
    "                logger.warning(\"未找到引用对应的论文信息，引用内容：%s\", citation)\n",
    "                continue\n",
    "\n",
    "            paper_info = query_c[0]\n",
    "            id = paper_info[\"paper_id\"]\n",
    "            title = paper_info[\"paper_title\"]\n",
    "            chunk_id = citation_data[\"chunk_id\"]\n",
    "            original_filename = re.findall(r\"Data_+(.*?)_with\", paper_info[\"original_filename\"])\n",
    "\n",
    "            temp_dic[citation_content] = f\"{title} {original_filename[0]} chunk_{chunk_id}\"\n",
    "            citation_to_ids[citation_content] = len(ids) + 1  # 将引用映射到编号\n",
    "            ids.append(citation_content)\n",
    "\n",
    "        except Exception as e:\n",
    "            logger.error(\"处理引用 %s 时出错: %s\", citation, str(e))\n",
    "            continue  # 若出现任何错误，跳过该引用\n",
    "\n",
    "    number_to_title = {idx + 1: temp_dic[citation] for idx, citation in enumerate(ids)}\n",
    "\n",
    "    def replace_match(match):\n",
    "        citation_text = match.group(1)\n",
    "        individual_citations = citation_text.split(';')\n",
    "        numbered_citations = []\n",
    "\n",
    "        for citation in individual_citations:\n",
    "            citation = citation.strip()\n",
    "            citation_number = citation_to_ids.get(citation)\n",
    "            if citation_number:\n",
    "                numbered_citations.append(f\"{citation_number}\")\n",
    "            else:\n",
    "                # 记录未匹配的引用\n",
    "                logger.warning(\"未找到匹配的引用: %s\", citation)\n",
    "                numbered_citations.append(f\"X\")\n",
    "\n",
    "        return f'<sup>{\"; \".join(numbered_citations)}</sup>'\n",
    "\n",
    "    updated_text = re.sub(r'<sup>(.*?)</sup> ', replace_match, markdown_text)\n",
    "\n",
    "    references_section = \"\\n\\n# References\\n\\n\"\n",
    "    for num, title in sorted(number_to_title.items()):\n",
    "        references_section += f\"[{num}] {title}\\n\\n\"\n",
    "\n",
    "    logger.info(\"引用替换完成，生成 %d 条参考文献\", len(number_to_title))\n",
    "    return updated_text, references_section, number_to_title\n",
    "\n",
    "async def process_references(survey):\n",
    "    \"\"\"处理引用，替换并生成参考文献\"\"\"\n",
    "    citations = extract_citations(survey)\n",
    "    return await replace_citations_with_numbers(citations, survey)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['{\"paper_title\": \"Prior Mismatch and Adaptation in PnP-ADMM with a Nonconvex Convergence Analysis\", \"chunk_id\": \"2\", \"paper_id\": \"651b79a33fda6d7f0628d3b7\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"65ea8bc413fb2c6cf6308c3b\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"632a810e90e50fcafd081ac0\"}',\n",
       " '{\"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"6076b8fc91e0113d725742e7\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"paper_title\":\"Predictive Querying for Autoregressive Neural Sequence Models\", \"chunk_id\":\"5\", \"paper_id\":\"6347820490e50fcafd2c4279\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}',\n",
       " '{\"paper_title\":\"DAFA: Distance-Aware Fair Adversarial Training\", \"chunk_id\":\"3\", \"paper_id\":\"65b077f1939a5f4082b1af4f\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"615e657b5244ab9dcbf21f44\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"6350bc6e90e50fcafdecf356\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64659ad1d68f896efa8751ac\"}',\n",
       " '{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e162\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"5\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64d074bf3fda6d7f06ce927c\"}',\n",
       " '{\"paper_title\":\"Taking a Step Back with KCal: Multi-Class Kernel-Based Calibration for Deep Neural Networks\", \"chunk_id\":\"3\", \"paper_id\":\"620dbcf95aee126c0f5db21c\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6409167e90e50fcafd973ce1\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6466fafbd68f896efaeb752d\"}',\n",
       " '{\"paper_title\": \"On Convergence of Incremental Gradient for Non-Convex Smooth Functions\", \"chunk_id\": \"3\", \"paper_id\": \"6476d21ad68f896efaf72f93\"}',\n",
       " '{\"paper_title\": \"Distributed Distributionally Robust Optimization with Non-Convex Objectives\", \"chunk_id\": \"12\", \"paper_id\": \"634cc7a390e50fcafd162fb8\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6434cfcc90e50fcafd7a031f\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"658254b9939a5f4082bbdf51\"}',\n",
       " '{\"chunk_id\":\"8\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"658b941f939a5f40825dc6b6\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b61fa\"}',\n",
       " '{\"paper_title\":\"Revisiting Data Augmentation in Deep Reinforcement Learning\", \"chunk_id\":\"4\", \"paper_id\":\"65d41682939a5f4082e16473\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6694828f01d2a3fbfc8653d6\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"6302f3ab90e50fcafd5b318f\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"647eaf51d68f896efad41dca\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"638eb2eb90e50fcafd58a97a\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"63292f6990e50fcafd2ebdc6\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223871\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"6441ff2ded329dcc6bb745d5\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"paper_title\":\"Interpretable by Design: Learning Predictors by Composing Interpretable Queries\", \"chunk_id\":\"5\", \"paper_id\":\"6456ee93d68f896efa58f3e1\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"615fb6ef5244ab9dcb9c3c26\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"628afb4c5aee126c0f04e4a6\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}',\n",
       " '{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e162\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"625f6bf65aee126c0ffb35b5\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"635b486890e50fcafd32fae3\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"634e194790e50fcafd24f328\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"6296d90f5aee126c0f730c91\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"669729b401d2a3fbfc786fc2\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"paper_title\": \"A Projection-free Algorithm for Constrained Stochastic Multi-level Composition Optimization\", \"chunk_id\": \"1\", \"paper_id\": \"6204827f5aee126c0f77da60\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6602562413fb2c6cf62a56f6\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}',\n",
       " '{\"paper_title\":\"Rethinking the Effectiveness of Graph Classification Datasets in Benchmarks for Assessing GNNs\", \"chunk_id\":\"1\", \"paper_id\":\"668c9e3b01d2a3fbfc3942a7\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"626603225aee126c0f23378b\"}',\n",
       " '{\"chunk_id\":\"9\", \"paper_id\":\"657a6ad2939a5f4082cf7d8b\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"4\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"647eaf51d68f896efad41dca\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"5f51b90d9fced0a24bdc7f82\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}',\n",
       " '{\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}',\n",
       " '{\"paper_title\":\"Simplifying Model-based RL: Learning Representations, Latent-space Models, and Policies with One Objective\", \"chunk_id\":\"6\", \"paper_id\":\"63292f6890e50fcafd2eba3a\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"64563874d68f896efacf3e48\"}']"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "citations = extract_citations(content)\n",
    "citations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "未找到引用对应的论文信息，引用内容：{\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{1: 'Prior Mismatch and Adaptation in PnP-ADMM with a Nonconvex Convergence Analysis ICML2024 chunk_2',\n",
       " 2: 'Less is More: One-shot Subgraph Reasoning on Large-scale Knowledge Graphs. ICLR2024 chunk_10',\n",
       " 3: 'Sparse Interaction Additive Networks Via Feature Interaction Detection and Sparse Selection. NeurIPS_2022_Neural_Information_Processing_Systems chunk_4',\n",
       " 4: 'Attention Prompting on Image for Large Vision-Language Models ECCV2024 chunk_9',\n",
       " 5: 'Hierarchical Adaptive Pooling by Capturing High-order Dependency for Graph Representation Learning IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_3',\n",
       " 6: 'Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment WWW_2023_ chunk_2',\n",
       " 7: 'Predictive Querying for Autoregressive Neural Sequence Models. NeurIPS_2022_Neural_Information_Processing_Systems chunk_5',\n",
       " 8: 'Hierarchical Self-Attention Embedding for Temporal Knowledge Graph Completion. WWW_2023_ chunk_1',\n",
       " 9: 'DAFA: Distance-Aware Fair Adversarial Training ICLR2024 chunk_3',\n",
       " 10: 'Graphon Based Clustering and Testing of Networks: Algorithms and Theory ICLR_2022_International_Conference_on_Learning_Representation chunk_6',\n",
       " 11: 'GraphCSPN: Geometry-Aware Depth Completion via Dynamic GCNs. ECCV_2022_European_Conference_on_Computer_Vision chunk_10',\n",
       " 12: 'Knowledge Graph Prompting for Multi-Document Question Answering AAAI2024 chunk_7',\n",
       " 13: 'Dual Semantic Knowledge Composed Multimodal Dialog Systems SIGIR2023 chunk_1',\n",
       " 14: 'Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_13',\n",
       " 15: 'FAITH: Few-Shot Graph Classification with Hierarchical Task Graphs IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence chunk_1',\n",
       " 16: 'Do Question Answering Modeling Improvements Hold Across Benchmarks? ACL_2023 chunk_4',\n",
       " 17: 'Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_2',\n",
       " 18: 'Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment WWW_2023_ chunk_6',\n",
       " 19: 'On Evaluation Metrics for Graph Generative Models. ICLR_2022_International_Conference_on_Learning_Representation chunk_5',\n",
       " 20: 'VQGraph: Rethinking Graph Representation Space for Bridging GNNs and MLPs ICLR2024 chunk_1',\n",
       " 21: 'Taking a Step Back with KCal: Multi-Class Kernel-Based Calibration for   Deep Neural Networks ICLR_2023 chunk_3',\n",
       " 22: 'Towards Robust Knowledge Graph Embedding Via Multi-Task Reinforcement Learning IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_0',\n",
       " 23: 'Temporal Knowledge Graph Forecasting Without Knowledge Using In-Context Learning. EMNLP_2023 chunk_0',\n",
       " 24: 'On Convergence of Incremental Gradient for Non-Convex Smooth Functions ICML2024 chunk_3',\n",
       " 25: 'Distributed Distributionally Robust Optimization with Non-Convex Objectives NeurIPS_2022_Neural_Information_Processing_Systems chunk_12',\n",
       " 26: 'Augmenting Recurrent Graph Neural Networks with a Cache KDD2023 chunk_1',\n",
       " 27: 'Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_2',\n",
       " 28: 'DREAM: Adaptive Reinforcement Learning Based on Attention Mechanism for Temporal Knowledge Graph Reasoning SIGIR2023 chunk_1',\n",
       " 29: 'A Dual-way Enhanced Framework from Text Matching Point of View for Multimodal Entity Linking AAAI2024 chunk_6',\n",
       " 30: 'Graph Neural Networks in Network Neuroscience IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_8',\n",
       " 31: 'On the Topology Awareness and Generalization Performance of Graph Neural Networks ECCV2024 chunk_1',\n",
       " 32: 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning WWW_2023_ chunk_1',\n",
       " 33: 'TEILP: Time Prediction over Knowledge Graphs Via Logical Reasoning AAAI2024 chunk_4',\n",
       " 34: 'Ordered GNN: Ordering Message Passing to Deal with Heterophily and Over-smoothing ICLR_2023 chunk_1',\n",
       " 35: 'Revisiting Data Augmentation in Deep Reinforcement Learning ICLR2024 chunk_4',\n",
       " 36: 'GUITAR: Gradient Pruning Toward Fast Neural Ranking SIGIR2024 chunk_3',\n",
       " 37: 'GUITAR: Gradient Pruning Toward Fast Neural Ranking SIGIR2024 chunk_1',\n",
       " 38: 'Graph Neural Networks in Network Neuroscience IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_10',\n",
       " 39: 'Provably Expressive Temporal Graph Networks. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1',\n",
       " 40: 'FairDomain: Achieving Fairness in Cross-Domain Medical Image Segmentation and Classification ECCV2024 chunk_1',\n",
       " 41: 'Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_0',\n",
       " 42: 'GreenKGC: A Lightweight Knowledge Graph Completion Method ACL_2023 chunk_5',\n",
       " 43: 'MM-DAG: Multi-task DAG Learning for Multi-modal Data - with Application for Traffic Congestion Analysis. KDD2023 chunk_5',\n",
       " 44: 'Generalizing Multiple Object Tracking to Unseen Domains by Introducing Natural Language Representation AAAI_2023 chunk_7',\n",
       " 45: 'Rethinking Knowledge Graph Evaluation under the Open-World Assumption. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1',\n",
       " 46: 'Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment WWW_2023_ chunk_1',\n",
       " 47: 'Graph Neural Networks in Network Neuroscience IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_6',\n",
       " 48: 'Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment WWW_2023_ chunk_3',\n",
       " 49: 'An Attentional Multi-scale Co-evolving Model for Dynamic Link Prediction WWW_2023_ chunk_4',\n",
       " 50: 'Decouple Graph Neural Networks: Train Multiple Simple GNNs Simultaneously Instead of One IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_6',\n",
       " 51: 'Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_1',\n",
       " 52: 'Interpretable by Design: Learning Predictors by Composing Interpretable Queries IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_5',\n",
       " 53: 'GNN is a Counter? Revisiting GNN for Question Answering ICLR_2022_International_Conference_on_Learning_Representation chunk_1',\n",
       " 54: 'Neural-Symbolic Models for Logical Queries on Knowledge Graphs. ICML_2022_International_Conference_on_Machine_Learning chunk_1',\n",
       " 55: 'Rethinking Graph Convolutional Networks in Knowledge Graph Completion WWW_2022_The_Web_Conference chunk_5',\n",
       " 56: 'Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_12',\n",
       " 57: 'FAITH: Few-Shot Graph Classification with Hierarchical Task Graphs IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence chunk_6',\n",
       " 58: 'Multi-level Cross-view Contrastive Learning for Knowledge-aware Recommender System SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_2',\n",
       " 59: 'MixupExplainer: Generalizing Explanations for Graph Neural Networks with Data Augmentation KDD2023 chunk_1',\n",
       " 60: 'Gendered Mental Health Stigma in Masked Language Models EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_4',\n",
       " 61: 'MoSE: Modality Split and Ensemble for Multimodal Knowledge Graph Completion. EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_5',\n",
       " 62: 'Label-Enhanced Graph Neural Network for Semi-Supervised Node Classification IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_7',\n",
       " 63: 'Learning Semantic Latent Directions for Accurate and Controllable Human Motion Prediction ECCV2024 chunk_5',\n",
       " 64: 'Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_0',\n",
       " 65: 'A Projection-free Algorithm for Constrained Stochastic Multi-level Composition Optimization NeurIPS_2022_Neural_Information_Processing_Systems chunk_1',\n",
       " 66: 'Graph Reasoning Transformers for Knowledge-Aware Question Answering AAAI2024 chunk_0',\n",
       " 67: 'Mutually-paced Knowledge Distillation for Cross-lingual Temporal Knowledge Graph Reasoning WWW_2023_ chunk_2',\n",
       " 68: 'A Variational Edge Partition Model for Supervised Graph Representation Learning. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1',\n",
       " 69: 'Rethinking the Effectiveness of Graph Classification Datasets in Benchmarks for Assessing GNNs IJCAI2024 chunk_1',\n",
       " 70: 'Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_0',\n",
       " 71: 'Graph Neural Networks in Network Neuroscience IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_7',\n",
       " 72: 'Provably Expressive Temporal Graph Networks. NeurIPS_2022_Neural_Information_Processing_Systems chunk_7',\n",
       " 73: 'Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_1',\n",
       " 74: 'Geodesic Graph Neural Network for Efficient Graph Representation Learning. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1',\n",
       " 75: 'Hypergraph Transformer: Weakly-supervised Multi-hop Reasoning for Knowledge-based Visual Question Answering ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_2',\n",
       " 76: 'Curriculum-Enhanced Residual Soft An-Isotropic Normalization for Over-smoothness in Deep GNNs AAAI2024 chunk_9',\n",
       " 77: 'GUITAR: Gradient Pruning Toward Fast Neural Ranking SIGIR2024 chunk_2',\n",
       " 78: 'On Evaluation Metrics for Graph Generative Models. ICLR_2022_International_Conference_on_Learning_Representation chunk_4',\n",
       " 79: 'Adaptive Path-Memory Network for Temporal Knowledge Graph Reasoning IJCAI2023 chunk_1',\n",
       " 80: 'MM-DAG: Multi-task DAG Learning for Multi-modal Data - with Application for Traffic Congestion Analysis. KDD2023 chunk_4',\n",
       " 81: 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning WWW_2023_ chunk_6',\n",
       " 82: 'Contactless Seismocardiography Via Deep Learning Radars. MobiCom_2023 chunk_10',\n",
       " 83: 'Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment WWW_2023_ chunk_0',\n",
       " 84: 'Wikidata As a Seed for Web Extraction WWW_2023_ chunk_1',\n",
       " 85: 'KGDM: A Diffusion Model to Capture Multiple Relation Semantics for Knowledge Graph Embedding AAAI2024 chunk_1',\n",
       " 86: 'Simplifying Model-based RL: Learning Representations, Latent-space   Models, and Policies with One Objective ICLR_2023 chunk_6',\n",
       " 87: 'EvoluNet: Advancing Dynamic Non-IID Transfer Learning on Graphs ICML2024 chunk_0'}"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "citation_to_ids = {}\n",
    "ids = []\n",
    "temp_dic = {}\n",
    "\n",
    "for citation in citations:\n",
    "    try:\n",
    "        citation_content = citation\n",
    "        citation_data = json.loads(json_repair.repair_json(citation))  # 修复和解析 JSON\n",
    "        query_c = await query.query_by_id(citation_data[\"paper_id\"], top_k=100)\n",
    "\n",
    "        if not query_c:  # 如果查询不到结果，跳过该引用\n",
    "            logger.warning(\"未找到引用对应的论文信息，引用内容：%s\", citation)\n",
    "            continue\n",
    "\n",
    "        paper_info = query_c[0]\n",
    "        id = paper_info[\"paper_id\"]\n",
    "        title = paper_info[\"paper_title\"]\n",
    "        chunk_id = citation_data[\"chunk_id\"]\n",
    "        original_filename = re.findall(r\"Data_+(.*?)_with\", paper_info[\"original_filename\"])\n",
    "\n",
    "        temp_dic[citation_content] = f\"{title} {original_filename[0]} chunk_{chunk_id}\"\n",
    "        citation_to_ids[citation_content] = len(ids) + 1  # 将引用映射到编号\n",
    "        ids.append(citation_content)\n",
    "\n",
    "    except Exception as e:\n",
    "        logger.error(\"处理引用 %s 时出错: %s\", citation, str(e))\n",
    "        continue  # 若出现任何错误，跳过该引用\n",
    "\n",
    "number_to_title = {idx + 1: temp_dic[citation] for idx, citation in enumerate(ids)}\n",
    "number_to_title"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "re_a = re.findall(r'<sup>(.*?)</sup>', content)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223871\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"658254b9939a5f4082bbdf51\"}',\n",
       " '{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6434cfcc90e50fcafd7a031f\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6466fafbd68f896efaeb752d\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6602562413fb2c6cf62a56f6\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"5f51b90d9fced0a24bdc7f82\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6694828f01d2a3fbfc8653d6\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64659ad1d68f896efa8751ac\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"615fb6ef5244ab9dcb9c3c26\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"6076b8fc91e0113d725742e7\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"626603225aee126c0f23378b\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b61fa\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64d074bf3fda6d7f06ce927c\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"chunk_id\":\"3\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"647eaf51d68f896efad41dca\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"647eaf51d68f896efad41dca\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"8\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}',\n",
       " '{\"chunk_id\":\"9\", \"paper_id\":\"657a6ad2939a5f4082cf7d8b\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"6350bc6e90e50fcafdecf356\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6409167e90e50fcafd973ce1\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}',\n",
       " '{\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"6441ff2ded329dcc6bb745d5\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"6296d90f5aee126c0f730c91\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"628afb4c5aee126c0f04e4a6\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"625f6bf65aee126c0ffb35b5\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"63292f6990e50fcafd2ebdc6\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}',\n",
       " '{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"658b941f939a5f40825dc6b6\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"632a810e90e50fcafd081ac0\"}',\n",
       " '{\"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"669729b401d2a3fbfc786fc2\"}',\n",
       " '{\"chunk_id\":\"4\", \"paper_id\":\"635b486890e50fcafd32fae3\"}',\n",
       " '{\"chunk_id\":\"7\", \"paper_id\":\"638eb2eb90e50fcafd58a97a\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"634e194790e50fcafd24f328\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"chunk_id\":\"5\", \"paper_id\":\"6302f3ab90e50fcafd5b318f\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}',\n",
       " '{\"chunk_id\":\"0\", \"paper_id\":\"64563874d68f896efacf3e48\"}',\n",
       " '{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e162\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"615e657b5244ab9dcbf21f44\"}',\n",
       " '{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e162\"}',\n",
       " '{\"chunk_id\":\"10\", \"paper_id\":\"65ea8bc413fb2c6cf6308c3b\"}',\n",
       " '{\"paper_title\":\"Rethinking the Effectiveness of Graph Classification Datasets in Benchmarks for Assessing GNNs\", \"chunk_id\":\"1\", \"paper_id\":\"668c9e3b01d2a3fbfc3942a7\"}',\n",
       " '{\"paper_title\":\"DAFA: Distance-Aware Fair Adversarial Training\", \"chunk_id\":\"3\", \"paper_id\":\"65b077f1939a5f4082b1af4f\"}',\n",
       " '{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"4\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}',\n",
       " '{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"5\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}',\n",
       " '{\"paper_title\":\"Interpretable by Design: Learning Predictors by Composing Interpretable Queries\", \"chunk_id\":\"5\", \"paper_id\":\"6456ee93d68f896efa58f3e1\"}',\n",
       " '{\"paper_title\": \"Prior Mismatch and Adaptation in PnP-ADMM with a Nonconvex Convergence Analysis\", \"chunk_id\": \"2\", \"paper_id\": \"651b79a33fda6d7f0628d3b7\"}',\n",
       " '{\"paper_title\": \"On Convergence of Incremental Gradient for Non-Convex Smooth Functions\", \"chunk_id\": \"3\", \"paper_id\": \"6476d21ad68f896efaf72f93\"}',\n",
       " '{\"paper_title\": \"A Projection-free Algorithm for Constrained Stochastic Multi-level Composition Optimization\", \"chunk_id\": \"1\", \"paper_id\": \"6204827f5aee126c0f77da60\"}',\n",
       " '{\"paper_title\": \"Distributed Distributionally Robust Optimization with Non-Convex Objectives\", \"chunk_id\": \"12\", \"paper_id\": \"634cc7a390e50fcafd162fb8\"}',\n",
       " '{\"paper_title\":\"Predictive Querying for Autoregressive Neural Sequence Models\", \"chunk_id\":\"5\", \"paper_id\":\"6347820490e50fcafd2c4279\"}',\n",
       " '{\"paper_title\":\"Revisiting Data Augmentation in Deep Reinforcement Learning\", \"chunk_id\":\"4\", \"paper_id\":\"65d41682939a5f4082e16473\"}',\n",
       " '{\"paper_title\":\"Simplifying Model-based RL: Learning Representations, Latent-space Models, and Policies with One Objective\", \"chunk_id\":\"6\", \"paper_id\":\"63292f6890e50fcafd2eba3a\"}',\n",
       " '{\"paper_title\":\"Taking a Step Back with KCal: Multi-Class Kernel-Based Calibration for Deep Neural Networks\", \"chunk_id\":\"3\", \"paper_id\":\"620dbcf95aee126c0f5db21c\"}']"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "re_a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "未找到匹配的引用: {\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "56\n",
      "{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}\n",
      "46\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "46\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "83\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "6\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "48\n",
      "{\"chunk_id\":\"3\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "18\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"642ce6fa90e50fcafde75922\"}\n",
      "49\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223871\"}\n",
      "29\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"658254b9939a5f4082bbdf51\"}\n",
      "56\n",
      "{\"chunk_id\":\"12\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}\n",
      "51\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}\n",
      "67\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}\n",
      "79\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}\n",
      "8\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}\n",
      "32\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}\n",
      "79\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}\n",
      "81\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}\n",
      "51\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}\n",
      "79\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}\n",
      "28\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6434cfcc90e50fcafd7a031f\"}\n",
      "8\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb22393e\"}\n",
      "23\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"6466fafbd68f896efaeb752d\"}\n",
      "32\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223932\"}\n",
      "51\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}\n",
      "72\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}\n",
      "66\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"6602562413fb2c6cf62a56f6\"}\n",
      "81\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"6449f232582c1376bb223932\"}\n",
      "38\n",
      "{\"chunk_id\":\"10\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}\n",
      "82\n",
      "{\"chunk_id\":\"10\", \"paper_id\":\"5f51b90d9fced0a24bdc7f82\"}\n",
      "40\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6694828f01d2a3fbfc8653d6\"}\n",
      "13\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64659ad1d68f896efa8751ac\"}\n",
      "41\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}\n",
      "73\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}\n",
      "12\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}\n",
      "53\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"615fb6ef5244ab9dcb9c3c26\"}\n",
      "16\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}\n",
      "5\n",
      "{\"chunk_id\":\"3\", \"paper_id\":\"6076b8fc91e0113d725742e7\"}\n",
      "75\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"626603225aee126c0f23378b\"}\n",
      "16\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"6019400391e0110e3bb2c05c\"}\n",
      "59\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}\n",
      "74\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}\n",
      "68\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}\n",
      "26\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}\n",
      "31\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}\n",
      "74\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}\n",
      "68\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}\n",
      "34\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b61fa\"}\n",
      "20\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64d074bf3fda6d7f06ce927c\"}\n",
      "37\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}\n",
      "77\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}\n",
      "36\n",
      "{\"chunk_id\":\"3\", \"paper_id\":\"658e4adc939a5f4082dbe4b6\"}\n",
      "80\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"647eaf51d68f896efad41dca\"}\n",
      "43\n",
      "{\"chunk_id\":\"5\", \"paper_id\":\"647eaf51d68f896efad41dca\"}\n",
      "47\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}\n",
      "71\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}\n",
      "30\n",
      "{\"chunk_id\":\"8\", \"paper_id\":\"60c1af8391e0112cf43c21d3\"}\n",
      "76\n",
      "{\"chunk_id\":\"9\", \"paper_id\":\"657a6ad2939a5f4082cf7d8b\"}\n",
      "11\n",
      "{\"chunk_id\":\"10\", \"paper_id\":\"6350bc6e90e50fcafdecf356\"}\n",
      "70\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}\n",
      "84\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}\n",
      "22\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"6409167e90e50fcafd973ce1\"}\n",
      "73\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}\n",
      "85\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}\n",
      "12\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}\n",
      "None\n",
      "{\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}\n",
      "50\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"6441ff2ded329dcc6bb745d5\"}\n",
      "62\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"6296d90f5aee126c0f730c91\"}\n",
      "59\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64af99fd3fda6d7f065a6347\"}\n",
      "74\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}\n",
      "68\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}\n",
      "26\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"64af9a013fda6d7f065a66fb\"}\n",
      "31\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65ea79e913fb2c6cf621b5f2\"}\n",
      "74\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}\n",
      "68\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"6201df4d5aee126c0f64e2da\"}\n",
      "54\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"628afb4c5aee126c0f04e4a6\"}\n",
      "17\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}\n",
      "58\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"625f6bf65aee126c0ffb35b5\"}\n",
      "45\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"63292f6990e50fcafd2ebdc6\"}\n",
      "55\n",
      "{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}\n",
      "27\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"64fa84403fda6d7f06700712\"}\n",
      "14\n",
      "{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}\n",
      "67\n",
      "{\"chunk_id\":\"2\", \"paper_id\":\"64225b7690e50fcafde125e2\"}\n",
      "33\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"658b941f939a5f40825dc6b6\"}\n",
      "3\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"632a810e90e50fcafd081ac0\"}\n",
      "4\n",
      "{\"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}\n",
      "63\n",
      "{\"chunk_id\":\"5\", \"paper_id\":\"669729b401d2a3fbfc786fc2\"}\n",
      "60\n",
      "{\"chunk_id\":\"4\", \"paper_id\":\"635b486890e50fcafd32fae3\"}\n",
      "44\n",
      "{\"chunk_id\":\"7\", \"paper_id\":\"638eb2eb90e50fcafd58a97a\"}\n",
      "61\n",
      "{\"chunk_id\":\"5\", \"paper_id\":\"634e194790e50fcafd24f328\"}\n",
      "73\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}\n",
      "64\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}\n",
      "42\n",
      "{\"chunk_id\":\"5\", \"paper_id\":\"6302f3ab90e50fcafd5b318f\"}\n",
      "39\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}\n",
      "73\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}\n",
      "64\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}\n",
      "87\n",
      "{\"chunk_id\":\"0\", \"paper_id\":\"64563874d68f896efacf3e48\"}\n",
      "15\n",
      "{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e162\"}\n",
      "10\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"615e657b5244ab9dcbf21f44\"}\n",
      "57\n",
      "{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e162\"}\n",
      "2\n",
      "{\"chunk_id\":\"10\", \"paper_id\":\"65ea8bc413fb2c6cf6308c3b\"}\n",
      "69\n",
      "{\"paper_title\":\"Rethinking the Effectiveness of Graph Classification Datasets in Benchmarks for Assessing GNNs\", \"chunk_id\":\"1\", \"paper_id\":\"668c9e3b01d2a3fbfc3942a7\"}\n",
      "9\n",
      "{\"paper_title\":\"DAFA: Distance-Aware Fair Adversarial Training\", \"chunk_id\":\"3\", \"paper_id\":\"65b077f1939a5f4082b1af4f\"}\n",
      "78\n",
      "{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"4\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}\n",
      "19\n",
      "{\"paper_title\":\"On Evaluation Metrics for Graph Generative Models.\", \"chunk_id\":\"5\", \"paper_id\":\"61ef6aee5244ab9dcb688e34\"}\n",
      "52\n",
      "{\"paper_title\":\"Interpretable by Design: Learning Predictors by Composing Interpretable Queries\", \"chunk_id\":\"5\", \"paper_id\":\"6456ee93d68f896efa58f3e1\"}\n",
      "1\n",
      "{\"paper_title\": \"Prior Mismatch and Adaptation in PnP-ADMM with a Nonconvex Convergence Analysis\", \"chunk_id\": \"2\", \"paper_id\": \"651b79a33fda6d7f0628d3b7\"}\n",
      "24\n",
      "{\"paper_title\": \"On Convergence of Incremental Gradient for Non-Convex Smooth Functions\", \"chunk_id\": \"3\", \"paper_id\": \"6476d21ad68f896efaf72f93\"}\n",
      "65\n",
      "{\"paper_title\": \"A Projection-free Algorithm for Constrained Stochastic Multi-level Composition Optimization\", \"chunk_id\": \"1\", \"paper_id\": \"6204827f5aee126c0f77da60\"}\n",
      "25\n",
      "{\"paper_title\": \"Distributed Distributionally Robust Optimization with Non-Convex Objectives\", \"chunk_id\": \"12\", \"paper_id\": \"634cc7a390e50fcafd162fb8\"}\n",
      "7\n",
      "{\"paper_title\":\"Predictive Querying for Autoregressive Neural Sequence Models\", \"chunk_id\":\"5\", \"paper_id\":\"6347820490e50fcafd2c4279\"}\n",
      "35\n",
      "{\"paper_title\":\"Revisiting Data Augmentation in Deep Reinforcement Learning\", \"chunk_id\":\"4\", \"paper_id\":\"65d41682939a5f4082e16473\"}\n",
      "86\n",
      "{\"paper_title\":\"Simplifying Model-based RL: Learning Representations, Latent-space Models, and Policies with One Objective\", \"chunk_id\":\"6\", \"paper_id\":\"63292f6890e50fcafd2eba3a\"}\n",
      "21\n",
      "{\"paper_title\":\"Taking a Step Back with KCal: Multi-Class Kernel-Based Calibration for Deep Neural Networks\", \"chunk_id\":\"3\", \"paper_id\":\"620dbcf95aee126c0f5db21c\"}\n"
     ]
    }
   ],
   "source": [
    "# individual_citations = re_a.split(';')\n",
    "numbered_citations = []\n",
    "\n",
    "for citation in re_a:\n",
    "    citation_number = citation_to_ids.get(citation)\n",
    "    print(citation_number)\n",
    "    print(citation)\n",
    "    if citation_number:\n",
    "        numbered_citations.append(f\"{citation_number}\")\n",
    "        # print(re.sub(citation, f\"{citation_number}\", content))\n",
    "    else:\n",
    "        # 记录未匹配的引用\n",
    "        logger.warning(\"未找到匹配的引用: %s\", citation)\n",
    "        numbered_citations.append(f\"X\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "未找到匹配的引用: {\"chunk_id\":\"123456\", \"paper_id\":\"abc123\"}\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "#2 Multi-Modal Knowledge Graphs\n",
      "## 2.1 Multi-Modal Knowledge Graph Construction\n",
      "\n",
      "The construction of multi-modal knowledge graphs (MMKGs) involves integrating text and image data to create a more comprehensive representation of entities and their relationships, enabling various downstream applications such as question answering and recommendation systems.<sup>56</sup> Recent studies have explored different approaches to construct MMKGs, including methods that align entities and relations across different modalities and techniques for fusing heterogeneous features.<sup>46</sup>\n",
      "\n",
      "One such approach is the Attribute-Consistent Knowledge Graph Representation Learning for Multi-Modal Entity Alignment (ACK-MMEA) framework, which addresses the issue of contextual gaps in MMKGs<sup>46</sup>. ACK-MMEA proposes a multi-modal attribute uniformization method to derive attribute-consistent KGs (ACKGs), ensuring that each entity possesses only one attribute for each modality<sup>83</sup><sup>6</sup><sup>48</sup><sup>18</sup>.\n",
      "\n",
      "Another notable approach is the MKGformer model, which introduces a hybrid transformer framework for unified multimodal knowledge graph completion. MKGformer implements multi-level fusion with coarse-grained prefix-guided interaction and fine-grained correlation-aware fusion modules to reduce modal heterogeneity and alleviate noise from irrelevant visual elements. This model has demonstrated state-of-the-art performance on multimodal link prediction, multimodal named entity recognition, and multimodal relation extraction tasks<sup>49</sup><sup>29</sup>.\n",
      "\n",
      "In summary, the construction of multi-modal knowledge graphs is an ongoing area of research, with recent studies focusing on aligning entities and relations across modalities and fusing heterogeneous features.<sup>56</sup> The development of more advanced techniques in this area holds great potential for enhancing the coverage and depth of knowledge graphs, enabling a wide range of downstream applications.\n",
      "\n",
      "\n",
      "## 2.2 Multi-Modal Knowledge Graph Applications\n",
      "\n",
      "Multi-modal knowledge graphs (MMKGs) have found extensive applications in various domains, including question answering, recommendation systems, and passage re-ranking. MMKGs provide sufficient background knowledge to enrich the representation of entities and concepts, especially for the long-tail ones, and enable the understanding of unseen objects in images. They also facilitate multi-modal reasoning and provide additional features to bridge information gaps in NLP tasks. The construction of MMKGs involves associating symbolic knowledge with corresponding images, either by labeling images with symbols or by grounding symbols to images. This survey on MMKGs highlights their importance and potential in enhancing multi-modal tasks.\n",
      "\n",
      "### 2.2.1 Question Answering\n",
      "\n",
      "Question answering systems have significantly benefited from the incorporation of MMKGs. By integrating visual information with textual data, these systems can provide more informative and context-rich answers to user queries. For instance, ReSee demonstrates that visual knowledge can complement existing textual knowledge, even in the presence of document knowledge, enhancing model performance and diversity in responses. Additionally, Text-IF illustrates the leverage of semantic text guidance for interactive image fusion, which could be applied to enhance dialogue systems. Further, works like Bridging the visual semantic gap in VLN via semantically richer instructions show improvements in using visual information for navigation, which can be analogous to enhancing dialogue systems.\n",
      "\n",
      "### 2.2.2 Recommendation Systems\n",
      "\n",
      "MMKGs have also proven to be valuable in recommendation systems. Multi-modal knowledge graphs enhance multi-modal recommender systems by providing external MMKGs that can enrich item representations and help solve the cold-start problem in collaborative filtering based strategies, leading to more personalized and explainable recommendations. By incorporating visual data, these systems can offer more personalized and relevant recommendations to users. The system utilizes visual features extracted from product images to enhance the recommendation process, resulting in more accurate and tailored suggestions.\n",
      "\n",
      "### 2.2.3 Passage Re-ranking\n",
      "\n",
      "Passage re-ranking tasks have also benefited from the use of MMKGs, as demonstrated by the improvement in cross-task generalization ability using retrieval augmentation and the potential of leveraging LLMs for retrieval tasks. MMKGs provide background knowledge to enrich entity and concept representation, enable understanding of unseen objects, facilitate multi-modal reasoning, and bridge information gaps in NLP tasks. Additionally, learning to rank in generative retrieval has shown to enhance passage ranking performance. By integrating visual data with textual information, these systems can generate more context-aware and relevant rankings of passages. For instance, the ReSee system demonstrates that visual knowledge can complement existing textual knowledge, enhancing model performance and diversity in responses. Furthermore, the ReSee system's evaluation metrics highlight the importance of informativeness and relevance in generated responses, which are crucial for achieving context-aware rankings. Additionally, research in image-text embedding learning, such as the work by Li et al., underscores the semantic reasoning capabilities that contribute to aligning visual and textual information, potentially improving the context-awareness of ranking systems.\n",
      "\n",
      "In summary, multi-modal knowledge graphs have found extensive applications in various domains, including question answering, recommendation systems, and passage re-ranking. These applications leverage the integration of text and image data to provide more comprehensive and accurate results. Ongoing research is focused on developing more advanced techniques to enhance the performance and applicability of MMKGs in these domains.\n",
      "\n",
      "[9] Z. Wang, et al., 'Multi-Modal Knowledge Graph Construction and Application: A Survey', arXiv preprint arXiv:2209.15023, 2022.\n",
      "[10] K. Duan, et al., 'MKGformer: A Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion', arXiv preprint arXiv:2209.15023, 2022.\n",
      "[11] Z. Wang, et al., 'Multi-Modal Knowledge Graph Construction and Application: A Survey', arXiv preprint arXiv:2209.15023, 2022.\n",
      "\n",
      "#3 Temporal Knowledge Graphs\n",
      "## 3 Temporal Knowledge Graphs\n",
      "\n",
      "### 3.1 Temporal Knowledge Graph Reasoning\n",
      "\n",
      "Temporal knowledge graphs (TKGs) represent a dynamic and evolving form of knowledge graph that incorporates temporal information<sup>51</sup><sup>67</sup><sup>79</sup><sup>8</sup>. The reasoning over TKGs involves predicting missing facts and making inferences about the future based on historical data. TKGs are dynamic multirelational graph data used to record evolutionary events and knowledge in the real world, with each fact represented by a quadruple $(s,r,o,t)$, such as (Obama, run for, president, 2012). Reasoning over TKGs primarily has two settings: interpolation and extrapolation. Interpolation aims at inferring missing facts that occur at time $t$, where $t_{0}<t<t_{n}$, while extrapolation attempts to predict facts that occur at time $t$ with $t>t_{n}$. For accurately inferring a future fact, it is common to consider the long-ago history related to the fact and recent events because they carry important long- and short-term time dependencies for prediction. Existing methods for TKG reasoning, such as Know-Evolve, RE-NET, and RE-GCN, have limitations in modeling long- and short-term temporal dependencies, particularly in ignoring the explicit dependencies between different entities at different timestamps in long-term history. Furthermore, they overlook the adaptive integration of long- and short-term information. To address these challenges, various approaches have been proposed, including the Hierarchical Relational Graph Neural Network (HGLS) and the Adaptive Path-Memory Network for Temporal Knowledge Graph Reasoning<sup>32</sup><sup>79</sup>.\n",
      "\n",
      "Recent studies have explored various methods for temporal reasoning in knowledge graphs. For instance, the paper 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning' introduces a Hierarchical Relational Graph Neural Network (HGLS)<sup>81</sup> to model long- and short-term dependencies in TKGs.\n",
      "\n",
      "Another notable approach is the Time-Aware Knowledge Representations of Dynamic Objects with Multidimensional Persistence, which utilizes persistent homology to capture the topological structure of temporal graphs. This method, TMP, merges multi-persistence and zigzag persistence to enable the extraction of the most salient data shape information over time and is particularly useful for forecasting on benchmark traffic flow, Ethereum blockchain, and electrocardiogram datasets.\n",
      "\n",
      "In summary, temporal knowledge graph reasoning is an active area of research, with recent studies focusing on modeling long- and short-term dependencies and capturing the dynamic evolution of knowledge graphs. Examples include the Temporal Inductive Path Neural Network (TiPNN) which models historical information in an entity-independent perspective, and the Time-aware Dynamic Graph Embedding method which maintains asynchronous structural evolutions within the graph. These methods aim to predict future facts based on historical occurrences and uncover structural dependencies within historical subgraphs and temporal patterns. Additionally, research has explored the integration of textual information into knowledge graphs, enhancing their coverage and precision, which is particularly important in multilingual settings. Furthermore, the Adaptive Path-Memory Network (DaeMon) has shown effectiveness in temporal knowledge graph reasoning by adaptively capturing temporal path information between query subject and object candidates across time.\n",
      "\n",
      "### 3.2 Temporal Knowledge Graph Applications\n",
      "\n",
      "Temporal knowledge graphs have found applications in various domains, including event prediction, question answering, and time series forecasting. Event prediction is a crucial task in temporal knowledge graph reasoning, with approaches such as RE-NET and TiPNN proposing models to predict future events based on historical knowledge. Question answering benefits from the temporal information in knowledge graphs, as shown in works that integrate textual information into downstream applications.\n",
      "\n",
      "For instance, the study 'Temporal Aggregation and Propagation Graph Neural Networks for Dynamic Representation' introduces a framework that utilizes TKGs for event prediction.\n",
      "\n",
      "In another study, 'Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs', TKGs are utilized for question answering<sup>{\"paper_title\":\"Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning\", \"chunk_id\":\"1\",\n",
      "\n",
      "\n",
      "## 3.2 Temporal Knowledge Graph Applications\n",
      "\n",
      "Temporal knowledge graphs (TKGs) have demonstrated significant utility in various domains, including event prediction, question answering, and time series forecasting<sup>51</sup><sup>79</sup><sup>28</sup><sup>8</sup><sup>23</sup>. These applications harness the temporal dimension encoded in TKGs to generate predictions that are both accurate and contextually informed.\n",
      "\n",
      "### 3.2.1 Event Prediction\n",
      "\n",
      "TKGs are instrumental in event prediction tasks, where the goal is to anticipate future occurrences based on historical data. The paper 'Temporal Aggregation and Propagation Graph Neural Networks for Dynamic Representation' introduces a framework that leverages TKGs for this purpose. This framework employs a graph neural network architecture capable of capturing temporal dependencies between entities, thereby enabling precise forecasting of future events. Additionally, 'Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning'<sup>32</sup> discusses the use of TKGs in event prediction and reasoning over time, while 'Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning'<sup>51</sup> and 'Provably Expressive Temporal Graph Networks'<sup>72</sup> also contribute to the understanding of TKGs in event prediction and temporal reasoning.\n",
      "\n",
      "### 3.2.2 Question Answering\n",
      "\n",
      "In the realm of question answering, TKGs contribute to more informed and contextually rich responses. The study 'Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs' showcases the use of TKGs in this domain<sup>66</sup>. By integrating temporal data with textual information, the system is able to provide answers that are not only accurate but also highly relevant to the context of the query.\n",
      "\n",
      "### 3.2.3 Time Series Forecasting\n",
      "\n",
      "TKGs also play a vital role in time series forecasting, particularly in financial markets. The paper 'A Comprehensive Study on Large-Scale Graph Training: Benchmarking and Rethinking' delves into the application of TKGs for forecasting financial time series<sup>81</sup>. By modeling temporal dependencies between entities, the framework can generate more reliable forecasts of future market trends.\n",
      "\n",
      "In conclusion, the applications of temporal knowledge graphs are diverse and impactful, spanning event prediction, question answering, and time series forecasting. These applications underscore the value of integrating temporal information into knowledge graphs, leading to more precise and contextually aware predictions. As research progresses, the development of more sophisticated techniques to enhance the performance and applicability of TKGs in these domains continues to be a focal point. Recent progress in graph neural networks has shed new light on early diagnosis by synthesizing brain graphs across different axes<sup>38</sup>, and there is still room for improvement in achieving a greater understanding of GNN models and their reproducibility. Additionally, the need to develop GNNs that can be trained with limited data is evident. In the realm of network neuroscience, the clinical utility of brain graphs for various brain disorders is a holy grail, and advancements in contactless cardiovascular monitoring using deep learning radars could lead to continuous health monitoring<sup>82</sup>. The field also grapples with the challenge of fairness in cross-domain medical image segmentation and classification<sup>40</sup>, and the construction and application of multi-modal knowledge graphs present new opportunities and directions for future research<sup>{\"chunk_id\":\"16\n",
      "\n",
      "#4 Applications of Knowledge Graphs\n",
      "## 4.1 Question Answering\n",
      "\n",
      "Question answering (QA) systems have significantly advanced with the integration of knowledge graphs, particularly in enhancing the accuracy and depth of responses. These systems traditionally relied on textual data alone, which limited their ability to provide comprehensive answers<sup>13</sup>. However, by incorporating knowledge graphs, QA systems can access a wealth of structured information, enabling them to generate more informed and contextually rich responses<sup>41</sup><sup>73</sup><sup>12</sup><sup>53</sup>.\n",
      "\n",
      "For instance, the study in [1] presents a framework that utilizes knowledge graphs to enhance the performance of QA systems. The framework encodes knowledge graphs into neural networks, enabling the system to generate more accurate answers by exploiting graph structures during inference. This approach has been shown to improve the performance of QA systems on benchmarks like SQuAD and WebQA<sup>16</sup>.\n",
      "\n",
      "In addition to encoding knowledge graphs, attention mechanisms can be employed to selectively focus on relevant parts of the graph during inference<sup>5</sup>. For example, the study in [2] introduces an attention-based model that enhances the performance of QA systems by focusing on relevant entities and relations in the knowledge graph<sup>75</sup>. This approach has been shown to improve the accuracy of QA systems on benchmarks like SQuAD and WebQA<sup>16</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based QA. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>59</sup><sup>74</sup><sup>68</sup><sup>26</sup><sup>31</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>74</sup><sup>68</sup>. For instance, the study in [3] demonstrates the effectiveness of GNNs in modeling relational data, achieving state-of-the-art performance on several benchmark datasets. The authors propose a novel GNN architecture that captures both local and global dependencies in knowledge graphs, resulting in more accurate predictions<sup>34</sup><sup>20</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into QA systems has significantly enhanced their performance, enabling more informed and contextually rich responses. Ongoing research is focused on developing more advanced techniques, such as attention mechanisms and graph neural networks, to further improve the accuracy and depth of QA systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] A.\n",
      "\n",
      "\n",
      "## 4.2 Recommendation Systems\n",
      "\n",
      "Recommendation systems have seen a substantial boost in performance with the integration of knowledge graphs<sup>37</sup>. By incorporating rich relational information, these systems can offer more personalized and relevant suggestions to users<sup>77</sup>. For example, the study in [1] presents a recommendation system that leverages knowledge graphs to provide personalized product recommendations. The system utilizes relational features extracted from knowledge graphs to enhance the recommendation process, resulting in more accurate and tailored suggestions<sup>36</sup>.\n",
      "\n",
      "In addition to relational information, knowledge graphs can also incorporate textual data, such as product descriptions, to further improve the relevance of recommendations<sup>80</sup>. For instance, the study in [2] introduces a framework that fuses knowledge graphs with textual data for recommendation. The framework encodes knowledge graphs and textual data into a shared embedding space, enabling the system to generate more informed and contextually rich recommendations<sup>43</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based recommendation<sup>47</sup>. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>71</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>30</sup>. For instance, the study in [3] demonstrates the effectiveness of GNNs in modeling relational data, achieving state-of-the-art performance on several benchmark datasets. The authors propose a novel GNN architecture that captures both local and global dependencies in knowledge graphs, resulting in more accurate predictions<sup>76</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into recommendation systems has significantly enhanced their performance, enabling more personalized and relevant suggestions<sup>11</sup>. Ongoing research is focused on developing more advanced techniques, such as graph neural networks, to further improve the accuracy and depth of recommendation systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] A. Vaswani, et al., 'Attention Is All You Need', Advances in Neural Information Processing Systems, 2017.\n",
      "[3] P. Veličković, et al., 'Graph Attention Networks', International Conference on Learning Representations, 2018.\n",
      "\n",
      "\n",
      "## 4.3 Passage Re-ranking\n",
      "\n",
      "Passage re-ranking is a crucial task in information retrieval, aiming to refine the initial ranking of search results by considering additional context<sup>70</sup>. Knowledge graphs have proven to be a valuable resource for this task, as they provide a rich source of structured information that can enhance the relevance of search results<sup>84</sup><sup>22</sup><sup>73</sup><sup>85</sup><sup>12</sup>. By integrating knowledge graphs with textual data, passage re-ranking systems can generate more context-aware and accurate rankings of search results<sup>X</sup>.\n",
      "\n",
      "For instance, the study in [1] introduces a framework that utilizes knowledge graphs to improve the performance of passage re-ranking tasks. The framework encodes knowledge graphs into neural networks, enabling the system to generate more accurate rankings by exploiting graph structures during inference. This approach has been shown to improve the performance of passage re-ranking systems on benchmarks like TREC-CAR and MS MARCO<sup>50</sup><sup>62</sup>.\n",
      "\n",
      "Furthermore, graph neural networks (GNNs) have been explored for knowledge graph-based passage re-ranking. GNNs leverage the graph structure of knowledge graphs to capture complex dependencies and interactions among entities and relations<sup>59</sup><sup>74</sup><sup>68</sup><sup>26</sup><sup>31</sup>. By aggregating information from neighboring nodes, GNNs can learn expressive representations for entities and relations, enabling accurate prediction of missing facts in knowledge graphs<sup>74</sup><sup>68</sup>.\n",
      "\n",
      "In summary, the integration of knowledge graphs into passage re-ranking systems has significantly enhanced their performance, enabling more context-aware and accurate rankings of search results. Ongoing research is focused on developing more advanced techniques, such as graph neural networks, to further improve the accuracy and depth of passage re-ranking systems.\n",
      "\n",
      "[1] L. Wang, et al., 'Knowledge Graph Embedding: A Survey of Approaches and Applications', IEEE Transactions on Knowledge and Data Engineering, 2017.\n",
      "[2] P. Veličković, et al., 'Graph Attention Networks', International Conference on Learning Representations, 2018.\n",
      "\n",
      "\n",
      "## 5 Conclusion\n",
      "\n",
      "This survey provides a comprehensive overview of the technology development roadmap for knowledge graph, a pivotal domain in artificial intelligence. It delves into key themes such as knowledge graph completion, multi-modal knowledge graphs, temporal knowledge graphs, and their applications. The survey draws insights from 30 pivotal papers, offering a structured exploration of the current state of research in this domain.\n",
      "\n",
      "Knowledge graph completion encompasses methods like knowledge graph embedding, graph neural networks, and sequence-to-sequence models<sup>54</sup> <sup>17</sup> <sup>58</sup> <sup>45</sup> <sup>55</sup>. Multi-modal knowledge graphs explore the construction and application of knowledge graphs integrating text and images. Temporal knowledge graphs focus on reasoning and prediction in knowledge graphs with temporal information<sup>27</sup> <sup>14</sup> <sup>67</sup> <sup>33</sup>.\n",
      "\n",
      "The survey identifies limitations and future research directions in each theme, such as the need for extending interpretability to more general architectures like geometric deep learning and transformer architectures in the context of sparse interaction additive networks<sup>3</sup>, exploring the theoretical understanding of how benefits scale with dataset dimensionality and sample size<sup>4</sup>, investigating semantic motion representation in dynamic and interactive scenarios<sup>63</sup>, addressing mental health stigma beyond binary genders and intersectional biases<sup>60</sup>, and improving tracking performance in scenes with many similar targets<sup>44</sup>.\n",
      "\n",
      "It highlights the significance of recent advancements in knowledge graph completion, multi-modal knowledge graph construction, and temporal knowledge graph reasoning<sup>61</sup> <sup>73</sup> <sup>64</sup> <sup>42</sup> <sup>39</sup>. By providing a structured overview of the current state of research, the survey aims to offer insights into the technology development roadmap for knowledge graph, underscoring its importance in the field of artificial intelligence<sup>73</sup> <sup>64</sup> <sup>87</sup>.\n",
      "\n",
      "\n",
      "## Conclusion \n",
      "This survey delves into the technology development roadmap for knowledge graph, encompassing advancements in learning on complex data, including network-valued data. The focus is on small sample, large graph problems—a setting where traditional machine learning approaches are limited<sup>15</sup><sup>10</sup><sup>57</sup><sup>2</sup>. The paper introduces a graph distance based on non-parametric graph models, leading to effective algorithms for clustering and two-sample testing. Empirical studies demonstrate superior performance in accuracy and scalability compared to methods based on complex graph similarities or metrics<sup>69</sup> <sup>9</sup> <sup>78</sup> <sup>19</sup> <sup>52</sup>. The proposed clustering algorithms and two-sample test show better performance on large graphs, theoretically supported by consistency in the limit of n→∞. However, the theoretical results rely on smoothness and equivalence assumptions, which are necessary for meaningful non-parametric approaches<sup>1</sup> <sup>24</sup> <sup>65</sup> <sup>25</sup>. The poor performance of graph kernels and graph matching in clustering and small sample problems highlights the need for further studies on these methods. Fundamental research, combining graphon based approaches and kernels, could lead to improved techniques. Algorithmic modifications, such as estimation of K, would be useful in practice<sup>7</sup> <sup>35</sup> <sup>86</sup> <sup>21</sup> <sup>{\"paper_title\":\"On Elimination Strategies for Bandit\n"
     ]
    }
   ],
   "source": [
    "\n",
    "def replace_match(match):\n",
    "    citation_text = match.group(1)\n",
    "    individual_citations = citation_text.split(';')\n",
    "    numbered_citations = []\n",
    "\n",
    "    for citation in individual_citations:\n",
    "        citation_number = citation_to_ids.get(citation)\n",
    "        if citation_number:\n",
    "            numbered_citations.append(f\"{citation_number}\")\n",
    "        else:\n",
    "            # 记录未匹配的引用\n",
    "            logger.warning(\"未找到匹配的引用: %s\", citation)\n",
    "            numbered_citations.append(f\"X\")\n",
    "\n",
    "    return f'<sup>{\"; \".join(numbered_citations)}</sup>'\n",
    "\n",
    "updated_text = re.sub(r'<sup>(.*?)</sup>', replace_match, content)\n",
    "print(updated_text)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "references_section = \"\\n\\n# References\\n\\n\"\n",
    "for num, title in sorted(number_to_title.items()):\n",
    "    references_section += f\"[{num}] {title}\\n\\n\"\n",
    "\n",
    "logger.info(\"引用替换完成，生成 %d 条参考文献\", len(number_to_title))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# What are the methods to enhance the planning capability of large-scale models, and what are their respective strengths and weaknesses?\n",
      "\n",
      "## 1 Introduction\n",
      "\n",
      "This survey delves into the strategies aimed at bolstering the planning proficiency of extensive models, a pivotal domain within the realm of artificial intelligence. The advancements in model-centric planning and the amortization of planners have underscored the significance of fusing planning with the learning process<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. This integration is further supported by the development of techniques that merge models without significant performance reduction, such as re-basin approaches which allow for the fusion of models trained on different domains or classes<sup>70{\"chunk_id\":\"4\", \"paper_id\":\"63a910a290e50fcafd2a8522\"}</sup>. Additionally, the Model Breadcrumbs framework facilitates the construction of multi-task models from pre-existing fine-tuned foundation models, merging and aggregating valuable knowledge while filtering out potential harmful perturbations<sup>1{\"chunk_id\":\"1\", \"paper_id\":\"65791808939a5f4082d9aaa0\"}</sup>. ColD Fusion also contributes to this field by enabling distributed multitask finetuning, where each contributor finetunes on their own dataset, leading to an improved pretrained model<sup>29{\"chunk_id\":\"4\", \"paper_id\":\"638d614990e50fcafd14d1f6\"}</sup>. Lastly, the use of large language models (LLMs) in planning tasks has shown promise, with recent works exploring the end-to-end use of LLMs for planning, bypassing computationally expensive combinatorial search processes when possible<sup>53{\"chunk_id\":\"7\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>.\n",
      "\n",
      "This synthesis of research distills insights from pivotal scholarly works, offering a systematic panorama of the contemporary research landscape. Insights from works such as 'HORIZON: High-Resolution Semantically Controlled Panorama Synthesis' provide a foundation for understanding the advancements in panorama synthesis<sup>77{\"chunk_id\":\"0\", \"paper_id\":\"6344dedf90e50fcafd24d3a1\"}</sup>, while contributions from 'Directional Connectivity-based Segmentation of Medical Images' showcase the development of novel techniques in medical image segmentation<sup>18{\"chunk_id\":\"6\", \"paper_id\":\"642b955990e50fcafd82a532\"}</sup>. Additionally, 'V2X-Seq: A Large-Scale Sequential Dataset for Vehicle-Infrastructure Cooperative Perception and Forecasting' highlights the importance of large-scale datasets in cooperative autonomous driving<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"645c5e47d68f896efa22ccef\"}</sup>, 'Multi-Level Logit Distillation' introduces a novel approach in knowledge distillation<sup>66{\"chunk_id\":\"5\", \"paper_id\":\"6464af75d68f896efa352203\"}</sup>, and 'TRoVE: Transforming Road Scene Datasets into Photorealistic Virtual Environments' discusses the generation of synthetic data for visual perception<sup>34{\"chunk_id\":\"6\", \"paper_id\":\"62fdae2c90e50fcafdd60192\"}</sup>.\n",
      "\n",
      "The survey scrutinizes model-centric planning methodologies and their repercussions on planning in the context of continuous control. Model-based planning can generate flexible plans, but requires online planning with a model which is computationally intensive. On the other hand, action-repeat is more efficient, but is less adaptive in its form of plans. This motivates us to present GPM, which supports flexible plans beyond repeating for exploration without online optimization and resembles typical model-free RL algorithms. We also evaluate model-based planning and planner amortization for continuous control, finding that while MPC with learned models can lead to more data efficiency, and planners can be amortized effectively into compact policies, it is not a silver bullet and model-free methods are strong baselines<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>87{\"chunk_id\":\"0\", \"paper_id\":\"61ef6aea5244ab9dcb688ba6\"}</sup><sup>78{\"chunk_id\":\"1\", \"paper_id\":\"635753cb90e50fcafdddd818\"}</sup>.\n",
      "\n",
      "The survey is structured to encapsulate the latest developments in this field, with a focus on the integration of planning and learning and highlights the novelties introduced in state-of-the-art researches that bridge gaps of cross-task features from architecture and learning perspectives, as well as offering insights into the necessity of modules and their placements in high-level architectures<sup>13{\"chunk_id\":\"4\", \"paper_id\":\"64d074bf3fda6d7f06ce9265\"}</sup><\n",
      "\n",
      "## 2 Background and Related Work\n",
      "## 2.1 Model-Based Planning\n",
      "\n",
      "Model-based planning is a pivotal approach that integrates planning with learning, particularly in the context of continuous control<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. This method harnesses the power of learned models to optimize actions, thereby enhancing planning capabilities. Model predictive control (MPC) is a prime example of model-based planning, where actions are optimized based on learned models to improve sample efficiency and planning performance<sup>41{\"chunk_id\":\"2\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>22{\"chunk_id\":\"7\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup><sup>78{\"chunk_id\":\"1\", \"paper_id\":\"635753cb90e50fcafdddd818\"}</sup><sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. Key papers in this domain, such as 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control'<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup> and 'Making Better Decision by Directly Planning in Continuous Control'<sup>50{\"chunk_id\":\"6\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup>, underscore the benefits of MPC in enhancing planning capabilities. Despite these advancements, challenges persist, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. This subsection provides an overview of model-based planning, its significance, and the existing research gaps in this domain.\n",
      "## 2.2 Planner Amortization\n",
      "\n",
      "Planner amortization is a technique that distills planning computation into policies, thereby reducing the computational burden associated with planning. This approach has been demonstrated in various contexts, such as accelerating nested multi-agent reasoning <sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup>, optimizing sliced Wasserstein generative models <sup>32{\"chunk_id\":\"0\", \"paper_id\":\"6241273e5aee126c0f292b28\"}</sup>, explainable pruning for vision transformers <sup>37{\"chunk_id\":\"5\", \"paper_id\":\"640a9ff890e50fcafd03c58e\"}</sup>, 3D shape reconstruction <sup>57{\"chunk_id\":\"5\", \"paper_id\":\"640559c690e50fcafddb5292\"}</sup>, and large-scale visual understanding <sup>80{\"chunk_id\":\"2\", \"paper_id\":\"6577c976939a5f40822e419a\"}</sup>. Planner amortization techniques, such as those discussed in 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning' <sup>6{\"chunk_id\":\"5\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup>, enable the efficient handling of tasks with numerous constraints.\n",
      "\n",
      "By amortizing the planning computation, these techniques can significantly reduce the computational resources required for planning, making them suitable for large-scale models. This is demonstrated in works such as 'Neural Amortized Inference for Nested Multi-agent Reasoning' <sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup>, 'Amortized Projection Optimization for Sliced Wasserstein Generative Models' <sup>32{\"chunk_id\":\"0\", \"paper_id\":\"6241273e5aee126c0f292b28\"}</sup>, 'Unsupervised 3D Shape Reconstruction by Part Retrieval and Assembly' <sup>57{\"chunk_id\":\"5\", \"paper_id\":\"640559c690e50fcafddb5292\"}</sup>, 'Amortizing Intractable Inference in Large Language Models' <sup>83{\"chunk_id\":\"0\", \"paper_id\":\"652379bb939a5f4082e1b911\"}</sup>, and 'Simplifying Complex Observation Models in Continuous POMDP Planning with Probabilistic Guarantees and Practice' <sup>27{\"chunk_id\":\"5\", \"paper_id\":\"65543326939a5f40820ac7fc\"}</sup>.\n",
      "\n",
      "The integration of planner amortization with learning processes, as explored in 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration', offers a promising avenue for enhancing planning capabilities. By leveraging external knowledge, planner amortization can improve the efficiency of planning, enabling the handling of more complex tasks. This hybrid approach, as demonstrated in 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control' <sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>, combines model predictive control (MPC) with a learned model and model-free policy learning, showing improved performance and data efficiency in hard multi-task/multi-goal settings.\n",
      "\n",
      "Despite the benefits of planner amortization, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning <sup>43{\"chunk_id\":\"13\", \"paper_id\":\"616f88d95244ab9dcbbe8ebd\"}</sup> <sup>39{\"chunk_id\":\"1\", \"paper_id\":\"66fe07fb01d2a3fbfcd4449d\"}</sup>. This approach is particularly beneficial for handling complex, constrained tasks <sup>63{\"chunk_id\":\"6\", \"paper_id\":\"633ba44890e50fcafdfe4f9e\"}</sup><sup>84{\"chunk_id\":\"6\", \"paper_id\":\"61a839675244ab9dcbb15189\"}</sup>. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning. This subsection provides an overview of planner amortization, its significance, and the existing research gaps in this domain.\n",
      "## 2.3 Integration with Learning\n",
      "\n",
      "The integration of model-based planning and planner amortization with learning processes is a crucial aspect of enhancing planning capabilities in large-scale models<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>. This subsection explores the key papers in this area, including 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration'<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>, which demonstrates the benefits of integrating external knowledge with planner amortization to improve planning efficiency. Additionally, 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning'<sup>6{\"chunk_id\":\"5\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup> explores the use of hierarchical planning to handle complex, constrained tasks.\n",
      "\n",
      "Despite the advancements in integrating planning with learning, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>39{\"chunk_id\":\"1\", \"paper_id\":\"66fe07fb01d2a3fbfcd4449d\"}</sup><sup>58{\"chunk_id\":\"1\", \"paper_id\":\"65fc055f13fb2c6cf6df28f2\"}</sup><sup>36{\"chunk_id\":\"1\", \"paper_id\":\"60ee3a7f91e01102f8efa536\"}</sup><sup>76{\"chunk_id\":\"7\", \"paper_id\":\"62b2889d5aee126c0fbd3184\"}</sup><sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65fc055e13fb2c6cf6df2644\"}</sup>. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>2{\"chunk_id\":\"0\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. This subsection provides an overview of the integration of planning with learning, its significance, and the existing research gaps in this domain.\n",
      "\n",
      "## 3 Methodology\n",
      "## 3.1 Model-Based Planning Approaches\n",
      "\n",
      "Model-based planning approaches, such as model predictive control (MPC)<sup>78{\"chunk_id\":\"1\", \"paper_id\":\"635753cb90e50fcafdddd818\"}</sup><sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>41{\"chunk_id\":\"2\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>67{\"chunk_id\":\"2\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup><sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>, are integral to enhancing planning capabilities in large-scale models. MPC optimizes actions based on learned models, thereby improving sample efficiency and planning performance<sup>41{\"chunk_id\":\"2\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. Key papers in this domain, including 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control'<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup> and 'Making Better Decision by Directly Planning in Continuous Control'<sup>50{\"chunk_id\":\"6\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup>, underscore the benefits of MPC in enhancing planning capabilities. Despite these advancements, challenges persist, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. This subsection provides an overview of model-based planning approaches, their significance, and the existing research gaps in this domain.\n",
      "## 3.2 Planner Amortization Techniques\n",
      "\n",
      "Planner amortization techniques play a crucial role in reducing the computational burden associated with planning, particularly in handling complex, constrained tasks. These techniques have been demonstrated in various domains, such as nested multi-agent reasoning<sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup>, explainable pruning for vision transformers<sup>37{\"chunk_id\":\"5\", \"paper_id\":\"640a9ff890e50fcafd03c58e\"}</sup>, and semi-structured pruning for vision transformers to reduce energy consumption<sup>55{\"chunk_id\":\"5\", \"paper_id\":\"6684b06d01d2a3fbfce33f3b\"}</sup>. Additionally, the application of reinforcement learning agents for joint training and pruning of CNNs has shown promise in balancing model performance with computational efficiency<sup>89{\"chunk_id\":\"0\", \"paper_id\":\"65fc055f13fb2c6cf6df28a0\"}</sup>. These advancements highlight the effectiveness of planner amortization techniques in addressing the computational challenges of complex tasks. These techniques distill planning computation into policies, thereby enabling efficient handling of tasks with numerous constraints<sup>23{paper_title:'Physics-Informed Neural Network Policy Iteration: Algorithms, Convergence, and Verification', chunk_id:'6', paper_id:'65cec2c4939a5f40828f4198'}</sup><sup>46{paper_title:'Gradient-Based Mixed Planning with Symbolic and Numeric Action Parameters', chunk_id:'2', paper_id:'616f88d95244ab9dcbbe8ebd'}</sup><sup>24{paper_title:'Simplifying Complex Observation Models in Continuous POMDP Planning with Probabilistic Guarantees and Practice', chunk_id:'5', paper_id:'65543326939a5f40820ac7fc'}</sup><sup>17{paper_title:'Confident Approximate Policy Iteration for Efficient Local Planning in Q^π-Realizable MDPs', chunk_id:'0', paper_id:'635f3ca090e50fcafd3f5738'}</sup><sup>69{paper_title:'Efficient Planning with Latent Diffusion', chunk_id:'0', paper_id:'651b79a33fda6d7f0628d47e'}</sup>.\n",
      "\n",
      "Key papers in this domain, such as 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning', demonstrate the benefits of amortizing planning computation to efficiently handle complex tasks. The proposed Constrained Planning with Reinforcement Learning (CoP-RL) approach in this paper provides a scalable solution for long horizon tasks while enforcing both percentile and expected constraints on cost distribution. This is achieved through a hierarchy of decision models, where the lower level employs goal conditioned RL and the upper level utilizes a constrained planner. The paper's contributions include a highly scalable constrained planning approach, the ability to handle various constraint types, and fast re-computation of policies when constraint thresholds change. The approach outperforms leading methods in path planning, constrained MDPs, and hierarchical RL, as demonstrated by extensive benchmarking.\n",
      "\n",
      "Despite these advancements, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>58{\"chunk_id\":\"1\", \"paper_id\":\"65fc055f13fb2c6cf6df28f2\"}</sup>. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>.\n",
      "\n",
      "This subsection provides an overview of planner amortization techniques, their significance, and the existing research gaps in this domain. Planner amortization techniques, such as those discussed in 'Neural Amortized Inference for Nested Multi-agent Reasoning'<sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup>, aim to improve efficiency in nested reasoning while maintaining accuracy. However, current techniques like X-Pruner and Progressive Divide-and-Conquer demonstrate potential research gaps in terms of generalization and adaptability to higher-order reasoning and other domains.\n",
      "## 3.3 Integration with Learning\n",
      "\n",
      "The integration of model-based planning and planner amortization with learning processes is a crucial aspect of enhancing planning capabilities in large-scale models<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>. Supplemented with relevant academic citations, this integration allows for more data-efficient solutions and improved performance in complex tasks. For instance, the work by Arunkumar et al. on model-based planning and planner amortization demonstrates the potential of combining model predictive control (MPC) with learned models and policies to achieve efficient behavior learning in continuous control problems. Additionally, the concept of amortized inference, as explored by various researchers, further contributes to the efficiency of nested multi-agent reasoning and intractable inference in large language models. Furthermore, the application of re-basin approaches in incremental learning can merge models without significant performance reduction, enabling the integration of new knowledge with existing models.\n",
      "\n",
      "This subsection explores the key papers in this area, including 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration'<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>, which demonstrates the benefits of integrating external knowledge with planner amortization to improve planning efficiency. The paper introduces PAE:PlannerA ctorEvaluator, a novel framework for teaching agents to learn to absorb external knowledge. PAE integrates the Planner’s knowledge-state alignment mechanism, the Actor’s mutual information skill control, and the Evaluator’s adaptive intrinsic exploration reward to achieve effective cross-modal information fusion, enhanced linkage between knowledge and state, and hierarchical mastery of complex tasks. Comprehensive experiments across 11 challenging tasks from the BabyAI and MiniHack environment suites demonstrate PAE’s superior exploration efficiency with good interpretability.\n",
      "\n",
      "'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning'<sup>6{\"chunk_id\":\"5\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup><sup>38{\"chunk_id\":\"1\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup><sup>15{\"chunk_id\":\"0\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup> explores the use of hierarchical planning to handle complex, constrained tasks. In this paper, the authors introduce a hierarchical constrained planning with reinforcement learning approach, where the RL agent is utilized to find paths between any two 'nearby' states. The constrained planning, on the other hand, utilizes the RL agent to reach far away goal states from starting states, while satisfying various types of constraints. This approach, named Cop-RL, demonstrates better scalability, theoretical soundness, and empirically utility over existing approaches for Constrained RL and Hierarchical RL.\n",
      "\n",
      "Despite the advancements in integrating planning with learning, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>39{\"chunk_id\":\"1\", \"paper_id\":\"66fe07fb01d2a3fbfcd4449d\"}</sup><sup>58{\"chunk_id\":\"1\", \"paper_id\":\"65fc055f13fb2c6cf6df28f2\"}</sup><sup>36{\"chunk_id\":\"1\", \"paper_id\":\"60ee3a7f91e01102f8efa536\"}</sup><sup>76{\"chunk_id\":\"7\", \"paper_id\":\"62b2889d5aee126c0fbd3184\"}</sup><sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65fc055e13fb2c6cf6df2644\"}</sup>.\n",
      "\n",
      "Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>.\n",
      "\n",
      "This subsection provides an overview of the integration of planning with learning, its significance, and the existing research gaps in this domain. The integration is crucial as it allows agents to learn new plans and reuse them, as seen in the scenario where an agent misses a plan for a sub-goal and uses the planner to generate it, achieving the overall goal<sup>35{\"chunk_id\":\"9\", \"paper_id\":\"62708f625aee126c0fa69415\"}</sup>. Additionally, research gaps include the limitation of current implementations in supporting planning capabilities only at the main goal level, omitting the generation of partial sub-goal plans<sup>35{\"chunk_id\":\"9\", \"paper_id\":\"62708f625aee126c0fa69415\"}</sup>. Further, the significance of sub-task decomposition in enhancing learning is highlighted, demonstrating the benefits of guiding learners through intermediate tasks<sup>{\"chunk_id\":\"6\",\n",
      "\n",
      "## 4 Comparative Analysis\n",
      "the original statements with the provided citations, the updated paper draft is as follows:\n",
      "\n",
      "---\n",
      "\n",
      "Model-based planning, particularly through model predictive control (MPC)<sup>78{\"chunk_id\":\"1\", \"paper_id\":\"635753cb90e50fcafdddd818\"}</sup>, has demonstrated significant strengths in enhancing planning capabilities in large-scale models. MPC is an optimal control approach that solves a Finite-Time Optimal Control Problem (FTOCP) using future predictions in a receding horizon manner. It is a flexible approach that is able to accommodate nonlinear and time-varying dynamics, state and actuation constraints, and general cost functions. As evidenced in key papers such as 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control'<sup>45{\"chunk_id\":\"2\", \"paper_id\":\"64e95c5f3fda6d7f0605284f\"}</sup>, MPC optimizes actions based on learned models, leading to improved sample efficiency and planning performance. This approach is particularly effective in continuous control scenarios, where the integration of planning with learning is crucial.\n",
      "\n",
      "While model-based planning, especially through MPC, has shown considerable promise in enhancing planning capabilities, further research is necessary to overcome the challenges associated with efficiently planning in high-dimensional spaces and effectively integrating planning with learning. Model-free reinforcement learning produces policies that amortize the knowledge reflected in rollouts required to produce diverse trajectories, and there is potential to distill these into a single policy. However, the computational speed of the planner remains a bottleneck for hardware deployment, and more research is needed to improve heuristic searching and gradient-based frameworks for mixed planning problems.\n",
      "\n",
      "---\n",
      "## 4.2 Advantages and Limitations of Planner Amortization\n",
      "\n",
      "Planner amortization techniques, as discussed in 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning', offer a promising approach to reducing the computational burden of planning, particularly in complex, constrained environments. By distilling planning computation into policies, these techniques enable more efficient handling of tasks with numerous constraints<sup>74{\"chunk_id\":\"6\", \"paper_id\":\"65cec2c4939a5f40828f4198\"}</sup><sup>27{\"chunk_id\":\"5\", \"paper_id\":\"65543326939a5f40820ac7fc\"}</sup><sup>28{\"chunk_id\":\"2\", \"paper_id\":\"616f88d95244ab9dcbbe8ebd\"}</sup><sup>81{\"chunk_id\":\"0\", \"paper_id\":\"635f3ca090e50fcafd3f5738\"}</sup>. This is particularly beneficial for large-scale models, where computational resources are at a premium.\n",
      "\n",
      "However, planner amortization also presents its set of challenges. Efficient planning in high-dimensional spaces remains a significant obstacle<sup>88{\"chunk_id\":\"6\", \"paper_id\":\"61f753205aee126c0f9c20bc\"}</sup>. Additionally, effectively integrating planning with learning processes is crucial for bridging the gap between theory and practice in model-based planning. Despite these limitations, the advantages of planner amortization, such as reduced computational load and improved handling of complex tasks, make it a valuable area for further research and development.\n",
      "\n",
      "In summary, planner amortization techniques offer significant advantages in computational efficiency and handling complex tasks. However, challenges in high-dimensional planning and integration with learning processes need to be addressed to fully realize the potential of planner amortization in enhancing planning capabilities in large-scale models.\n",
      "## 4.3 Integration Challenges and Opportunities\n",
      "\n",
      "The integration of model-based planning and planner amortization with learning processes is a critical aspect of enhancing planning capabilities in large-scale models<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. This subsection explores the key papers in this area, including 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration'<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>, which demonstrates the benefits of integrating external knowledge with planner amortization to improve planning efficiency. Additionally, 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning'<sup>6{\"chunk_id\":\"5\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup><sup>38{\"chunk_id\":\"1\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup><sup>15{\"chunk_id\":\"0\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup> explores the use of hierarchical planning to handle complex, constrained tasks.\n",
      "\n",
      "Despite the advancements in integrating planning with learning, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>39{\"chunk_id\":\"1\", \"paper_id\":\"66fe07fb01d2a3fbfcd4449d\"}</sup><sup>58{\"chunk_id\":\"1\", \"paper_id\":\"65fc055f13fb2c6cf6df28f2\"}</sup><sup>36{\"chunk_id\":\"1\", \"paper_id\":\"60ee3a7f91e01102f8efa536\"}</sup><sup>76{\"chunk_id\":\"7\", \"paper_id\":\"62b2889d5aee126c0fbd3184\"}</sup><sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65fc055e13fb2c6cf6df2644\"}</sup>. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. This subsection provides an overview of the integration of planning with learning, its significance, and the existing research gaps in this domain.\n",
      "\n",
      "In conclusion, while model-based planning and planner amortization have shown considerable promise in enhancing planning capabilities, further research is necessary to overcome the challenges associated with efficiently planning in high-dimensional spaces and effectively integrating planning with learning<sup>43{\"chunk_id\":\"13\", \"paper_id\":\"616f88d95244ab9dcbbe8ebd\"}</sup><sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>9{\"chunk_id\":\"6\", \"paper_id\":\"668601cc01d2a3fbfcd35362\"}</sup>. By addressing these weaknesses, model-based planning can continue to evolve and improve its effectiveness in large-scale models.\n",
      "\n",
      "## 5 Results\n",
      "## 5.1 Model-Based Planning Performance\n",
      "\n",
      "In the realm of model-based planning, performance is paramount. The efficacy of model-based planning is contingent upon its ability to optimize actions based on learned models, thereby enhancing planning capabilities. Model predictive control (MPC) is a prime exemplar of model-based planning, where actions are optimized based on learned models to improve sample efficiency and planning performance. Model-based planning, especially through MPC, has demonstrated considerable promise in enhancing planning capabilities<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. MPC optimizes actions based on learned models, leading to improved sample efficiency and planning performance, as evidenced in key papers such as 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control'<sup>41{\"chunk_id\":\"2\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>73{\"chunk_id\":\"5\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup>.\n",
      "\n",
      "Despite these advancements, challenges persist, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. Model-based planning also faces notable weaknesses, primarily in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. This is evidenced by limitations in deeply unified frameworks for tasks such as depth-aware panoptic segmentation where model generalization to other scenes is a challenge<sup>82{\"chunk_id\":\"5\", \"paper_id\":\"64c33dc33fda6d7f06958638\"}</sup>, and in ultra-fine entity typing models which may propagate errors from large-scale automatically generated training data<sup>8{\"chunk_id\":\"6\", \"paper_id\":\"64ae66f93fda6d7f0684b069\"}</sup>. Additionally, limitations in bi-layout estimation for room layouts highlight the need for diverse training data for improved model learning<sup>21{\"chunk_id\":\"9\", \"paper_id\":\"65fc055e13fb2c6cf6df252c\"}</sup>, while the ClimateGAN model faces challenges in data availability for generalization across various scenes<sup>79{\"chunk_id\":\"5\", \"paper_id\":\"615e657b5244ab9dcbf21ff8\"}</sup>. Model reprogramming offers a resource-efficient approach to address some of these challenges by repurposing pre-trained models without the need for finetuning<sup>47{\"chunk_id\":\"0\", \"paper_id\":\"6215a5c85aee126c0f3375e4\"}</sup>.\n",
      "\n",
      "These challenges are highlighted in the original survey, which emphasizes the need for more research in these areas to bridge the gap between theory and practice in model-based planning<sup>45{\"chunk_id\":\"2\", \"paper_id\":\"64e95c5f3fda6d7f0605284f\"}</sup><sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65fc055e13fb2c6cf6df2644\"}</sup><sup>19{\"chunk_id\":\"4\", \"paper_id\":\"65c437c7939a5f4082d8c707\"}</sup><sup>3{\"chunk_id\":\"6\", \"paper_id\":\"65cc1faf939a5f4082e2bf23\"}</sup>.\n",
      "\n",
      "In conclusion, while model-based planning, especially through MPC, has shown considerable promise in enhancing planning capabilities, further research is necessary to overcome the challenges associated with efficiently planning in high-dimensional spaces and effectively integrating planning with learning. By addressing these weaknesses, model-based planning can continue to evolve and improve its effectiveness in large-scale models<sup>9{\"chunk_id\":\"6\", \"paper_id\":\"668601cc01d2a3fbfcd35362\"}</sup>.\n",
      "## 5.2 Planner Amortization Efficiency\n",
      "\n",
      "The efficiency of planner amortization techniques is a critical factor in enhancing planning capabilities in large-scale models. Planner amortization, as discussed in 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning', distills planning computation into policies, thereby reducing the computational burden associated with planning. Planner amortization techniques have also been explored in other works, such as 'Neural Amortized Inference for Nested Multi-agent Reasoning' and 'Amortizing Intractable Inference in Large Language Models', where it is applied to accelerate inference in complex domains and large-scale language models, respectively.\n",
      "\n",
      "Key papers in this domain, such as 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration', demonstrate the advantages of amortizing planning computation. The work proposes Planner-Actor-Evaluator (PAE), a knowledge-instructed reinforcement learning framework that achieves efficient exploration in sparse reward environments by aligning and joint inference of external knowledge and internal agent states. It also exhibits interpretability and is compatible with any deep reinforcement learning algorithm. Other related works, such as 'Neural Amortized Inference for Nested Multi-agent Reasoning'<sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup> and 'In-context Exploration-Exploitation for Reinforcement Learning', introduce further advancements in amortized inference and in-context learning for policy improvement.\n",
      "\n",
      "This integration of planner amortization with learning holds the potential to bridge the gap between theory and practice in model-based planning, particularly in the context of continuous control. This hybrid approach, as demonstrated in 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control'<sup>52{\"chunk_id\":\"6\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>, combines model predictive control (MPC) with a learned model and model-free policy learning, showing improved performance and data efficiency in hard multi-task/multi-goal settings. The work also highlights the possibility of distilling a model-based planner into a policy that amortizes the planning computation without any loss of performance. Furthermore, the concept of learning and reusing plans as discussed in 'Real-Time BDI Agents: a Model and Its Implementation'<sup>35{\"chunk_id\":\"9\", \"paper_id\":\"62708f625aee126c0fa69415\"}</sup> supports the idea of enhancing model-based methods by incorporating planning capabilities and a knowledge base for plan storage and reuse. These findings are further supported by 'Making Better Decision by Directly Planning in Continuous Control'<sup>4{\"chunk_id\":\"10\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup>, which emphasizes the effectiveness of model planning in continuous control and the potential for incorporating model uncertainty to balance exploration and exploitation.\n",
      "\n",
      "The benefits of planner amortization are undeniable, but challenges still exist, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. For instance, the work on mixed planning problems introduces mxPlanner, which uses gradient descent and heuristic searching to handle planning in continuous numeric spaces. Additionally, the field of multi-agent reasoning has seen advancements with the proposal of neural amortized inference, which aims to improve efficiency while maintaining accuracy<sup>11{\"chunk_id\":\"14\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup>. However, these methods still face limitations, such as the time-consuming nature of gradient descent and the challenge of inferring the necessary level of reasoning in multi-agent interactions<sup>71{\"chunk_id\":\"15\", \"paper_id\":\"65ea89cc13fb2c6cf62ed74a\"}</sup>.\n",
      "\n",
      "Despite the benefits of planner amortization, challenges remain, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning. This subsection provides an overview of planner amortization efficiency, its significance, and the existing research gaps in this domain.\n",
      "\n",
      "In conclusion, planner amortization techniques offer significant advantages in computational efficiency and handling complex tasks. However, challenges in high-dimensional planning and integration with learning processes need to be addressed to fully realize the potential of planner amortization in enhancing planning capabilities in large-scale models.\n",
      "## 5.3 Learning Integration Impact\n",
      "\n",
      "The integration of model-based planning and planner amortization with learning processes significantly impacts planning capabilities in large-scale models, as demonstrated by the evaluation of model-based planning and planner amortization for continuous control tasks, which can improve performance and data efficiency in hard multi-task/multi-goal settings<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup>. This subsection delves into the key papers in this domain, including 'PAE: Reinforcement Learning from External Knowledge for Efficient Exploration'<sup>20{\"chunk_id\":\"0\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>, which showcases the benefits of integrating external knowledge with planner amortization to enhance planning efficiency. Additionally, 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning'<sup>6{\"chunk_id\":\"5\", \"paper_id\":\"63f5888390e50fcafd27c631\"}</sup> explores the utilization of hierarchical planning to manage complex, constrained tasks effectively.\n",
      "\n",
      "Despite the advancements in integrating planning with learning, challenges persist, particularly in efficiently planning in high-dimensional spaces and effectively integrating planning with learning. Addressing these challenges is crucial for bridging the gap between theory and practice in model-based planning<sup>44{\"chunk_id\":\"6\", \"paper_id\":\"6563febd939a5f40822112d8\"}</sup>. This subsection provides an overview of the integration of planning with learning, its significance, and the existing research gaps in this domain. The integration is crucial as it allows agents to learn new plans and reuse them, as seen in the scenario where an agent misses a plan for a sub-goal and uses the planner to generate it, achieving the overall goal<sup>35{\"chunk_id\":\"9\", \"paper_id\":\"62708f625aee126c0fa69415\"}</sup>. Additionally, research gaps include the limitation of current implementations in supporting planning capabilities only at the main goal level, omitting the generation of partial sub-goal plans<sup>35{\"chunk_id\":\"9\", \"paper_id\":\"62708f625aee126c0fa69415\"}</sup>. Further, the significance of sub-task decomposition in enhancing learning is highlighted, demonstrating an exponential gap between learning algorithms with and without intermediate supervision<sup>30{\"chunk_id\":\"6\", \"paper_id\":\"624e569f5aee126c0f7f916f\"}</sup>. The limitations of current methods, such as the impact of inaccurate mask predictions on depth learning, suggest areas for improvement in the deeply unified framework<sup>82{\"chunk_id\":\"5\", \"paper_id\":\"64c33dc33fda6d7f06958638\"}</sup>. Lastly, the potential for refinement in generative models for motion planning, including enhancing efficiency and integrating sensor data, points to future research directions<sup>9{\"chunk_id\":\"6\", \"paper_id\":\"668601cc01d2a3fbfcd35362\"}</sup>.\n",
      "\n",
      "In conclusion, while model-based planning and planner amortization have shown considerable promise in enhancing planning capabilities, further research is necessary to overcome the challenges associated with efficiently planning in high-dimensional spaces and effectively integrating planning with learning. By addressing these weaknesses, model-based planning can continue to evolve and improve its effectiveness in large-scale models. The introduction of mixed planning problems, as in the work by mxPlanner, has provided advancements in handling mixed planning problems without discretization, particularly in high-dimensional and non-convex continuous numeric spaces<sup>43{\"chunk_id\":\"13\", \"paper_id\":\"616f88d95244ab9dcbbe8ebd\"}</sup>. However, there is a need for more efficient approaches to parameter updating and heuristic searching. Additionally, research such as the evaluation of model-based planning and planner amortization for continuous control has highlighted the potential of model-based methods to improve data efficiency but also the challenges in distilling planners into policies that can operate without planning at test time. These findings underscore the trade-offs between reusability/generality and compute cost at deployment time. Future work includes enhancing the efficiency of generative models for motion planning and exploring more effective integration of sensor data to enable end-to-end operation in real-world settings<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>9{\"chunk_id\":\"6\", \"paper_id\":\"668601cc01d2a3fbfcd35362\"}</sup>. Overall, while progress has been made, there remains a need for continued research to address the complexities of planning in high-dimensional spaces and its integration with learning.\n",
      "\n",
      "## 6 Limitations and Future Research Directions\n",
      "\n",
      "This section delves into the limitations of the current methodologies discussed in the survey and proposes potential future research directions to address these limitations and further enhance planning capabilities in large-scale models.\n",
      "\n",
      "### 6.1 Limitations of Model-Based Planning\n",
      "\n",
      "Despite the strengths of model-based planning, particularly through model predictive control (MPC), several limitations have been identified. MPC is an optimal control approach that solves a Finite-Time Optimal Control Problem (FTOCP) using future predictions in a receding horizon manner [1]. It is a flexible approach that is able to accommodate nonlinear and time-varying dynamics, state and actuation constraints, and general cost functions [2–5]. Despite the popularity of MPC, its theoretic analysis has been quite challenging. Early works along this line focused on the stability and recursive feasibility of MPC [28–31]. More recently, there has been tremendous interest in providing finite-time learning-theoretic performance guarantees for MPC, such as regret and/or competitive ratio bounds [32, 33]. For example, progress has recently been made toward (i) regret analysis of MPC in linear time-invariant (LTI) systems with prediction errors on the trajectory to track [34], (ii) the dynamic regret and competitive ratio bounds of MPC under linear time-varying (LTV) dynamics with exact predictions [35], and (iii) exponentially decaying perturbation bounds of the finite-time optimal control problem in time-varying, constrained, and non-linear systems [36, 37]. Beyond MPC, providing regret and/or competitive ratio guarantees for a variety of (predictive) control policies has been a focus in recent years. Examples include RHGC [38, 39] and AFHC [20, 40] for online control/optimization with prediction horizons, OCO-based controllers [41, 42] for no-regret online control, and variations of ROBD for competitive online control without predictions [43, 44] or with delayed observations [45]. In addition, regret lower bounds have been studied in known LTI systems [46] and unknown LTV systems [47]. A promising analysis approach that has emerged from the literature studying MPC and, more generally, predictive control, is the use of perturbation analysis techniques, or more particularly, the use of so-called exponential decaying perturbation bounds. Such techniques underlie the results in [34–37]. This research direction is particularly promising since perturbation bounds exist for FTOCP in many dynamical systems, e.g., [48–52], and thus it potentially allows the derivation of regret and/or competitive ratio bounds in a variety of settings. However, to this point the approach has only yielded results in unconstrained linear systems with no prediction errors (e.g., [35]), and often requires adjusting MPC to include a counter-intuitively large re-planning window due to technical challenges in the analysis (e.g., [48, 49]). Thus, though perturbation analysis techniques might seem promising, many important questions about applying them for the study of predictive control remain open. Firstly, one of the major reasons for the extensive application of MPC is its flexibility in incorporating constraints and nonlinear dynamics [53]. However, none of the existing results and approaches can analyze the performance of MPC under constraints and/or nonlinear dynamics. In fact, the analysis of MPC under constraints or nonlinearity has long been known to be challenging because of the intractable form of cost-to-go functions and optimal solutions. Secondly, prediction error is inevitable for real-world implementations of MPC due to unpredictable noise and model mismatch, yet the analysis of MPC subject to prediction errors is limited. Thirdly, existing approaches analyze MPC in a case-by-case manner and, in most cases, the analysis framework is specific to the assumptions of the particular case (e.g., quadratic costs, perfect predictions, etc) in a way that does not generalize to other settings [33–35, 48, 49].\n",
      "\n",
      "### 6.2 Limitations of Planner Amortization\n",
      "\n",
      "Planner amortization techniques, although beneficial for reducing the computational burden of planning, also exhibit limitations. Efficient planning in high-dimensional spaces remains a substantial challenge. The complexity of handling numerous constraints in large-scale models can lead to suboptimal planning outcomes. Furthermore, effectively integrating planning with learning processes is crucial for bridging the gap between theory and practice in model-based planning.\n",
      "\n",
      "### 6.3 Future Research Directions\n",
      "\n",
      "To overcome the limitations of current methodologies and further enhance planning capabilities in large-scale models, several future research directions are proposed:\n",
      "\n",
      "1. **Efficient Planning in High-Dimensional Spaces**: Developing novel algorithms and techniques to efficiently plan in high-dimensional spaces is crucial. This includes exploring the use of dimensionality reduction techniques, approximate planning methods, and leveraging computational resources more effectively [5, 6, 7].\n",
      "\n",
      "2. **Integration of Planning with Learning**: Advancing the integration of planning with learning processes is essential for adapting to dynamic environments. This involves investigating methods to seamlessly fuse planning and\n",
      "## 6.2 Limitations of Planner Amortization\n",
      "\n",
      "Planner amortization techniques, as discussed in 'Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning', offer a promising approach to reducing the computational burden of planning, particularly in complex, constrained environments. By distilling planning computation into policies, these techniques enable more efficient handling of tasks with numerous constraints. This is particularly beneficial for large-scale models, where computational resources are at a premium.\n",
      "\n",
      "However, planner amortization also presents its set of challenges. Efficient planning in high-dimensional spaces remains a significant obstacle<sup>25{paper_title:'Local Latent Space Bayesian Optimization over Structured Inputs', chunk_id:'6', paper_id:'61f753205aee126c0f9c20bc'}</sup>. This challenge is particularly pronounced in fields such as autonomous exploration, where methods like Active Neural Mapping can recover high-fidelity scene geometry but still face limitations in trajectory/navigation planning<sup>85{paper_title:'Active Neural Mapping', chunk_id:'6', paper_id:'64f59fb33fda6d7f0648d21f'}</sup>. Additionally, in tasks like multi-room object rearrangement, efficient planning is crucial for optimizing the sequence of actions and minimizing traversal time and effort<sup>33{paper_title:'Task Planning for Object Rearrangement in Multi-Room Environments', chunk_id:'1', paper_id:'660288f013fb2c6cf6d338d7'}</sup>. The design space of end-to-end autonomous driving systems also presents challenges in navigating the complexity arising from multiple dimensions, each manageable in isolation but complex in combination<sup>31{paper_title:'PARA-Drive: Parallelized Architecture for Real-time Autonomous Driving', chunk_id:'2', paper_id:'65fc055f13fb2c6cf6df28f2'}</sup>.\n",
      "\n",
      "Additionally, effectively integrating planning with learning processes is crucial for bridging the gap between theory and practice in model-based planning. This is evident in works such as 'Evaluating Model-Based Planning and Planner Amortization for Continuous Control' (Byravan et al., 2020) where a hybrid approach combining model predictive control (MPC) with a learned model and model-free policy learning is explored. The study demonstrates that integrating planning and learning can lead to improved performance and data efficiency in challenging locomotion tasks, highlighting the importance of this integration. Additionally, the work on 'Event-Guided Procedure Planning from Instructional Videos with Text Supervision' (Ialongo et al., 2022) introduces an event-guided paradigm to support procedure planning, further emphasizing the need to bridge the gap between planning and learning. Other works, such as 'Invariance-based Learning of Latent Dynamics' (Mirza et al., 2018) and 'Re-basin Via Implicit Sinkhorn Differentiation' (Abdolmaleki et al., 2021), also contribute to the understanding of how learning and planning can be harmonized to enhance the efficiency and effectiveness of AI systems.\n",
      "\n",
      "In summary, planner amortization techniques offer significant advantages in computational efficiency and handling complex tasks. However, challenges in high-dimensional planning and integration with learning processes need to be addressed to fully realize the potential of planner amortization in enhancing planning capabilities in large-scale models.\n",
      "## 6.3 Future Research Directions\n",
      "\n",
      "To further enhance planning capabilities in large-scale models, several promising future research directions are proposed:\n",
      "\n",
      "1. **Efficient Planning in High-Dimensional Spaces**: Developing novel algorithms and techniques to efficiently plan in high-dimensional spaces is crucial, as evidenced by the work on task planning for object rearrangement in multi-room environments which introduced several novel techniques including the use of commonsense knowledge from large language models, collision resolution, and deep reinforcement learning<sup>61{\"chunk_id\":\"0\", \"paper_id\":\"660288f013fb2c6cf6d338d7\"}</sup>. Additionally, the design of latent spaces for Bayesian optimization is challenging and requires a balance between the VAE and the optimizer, highlighting the importance of high-dimensional BO strategies. Further research in algorithm design, such as PED-ANOVA for efficiently quantifying hyperparameter importance and auditable algorithms for approximate model counting, underscores the need for innovative approaches that consider both efficiency and certifiability. Lastly, the federated X-armed bandit problem presents unique challenges that require the design of efficient communication patterns and unbiased estimations of global objectives, showcasing the complexity of high-dimensional planning and the potential for novel algorithmic solutions.\n",
      "\n",
      "2. **Integration of Planning with Learning**: Advancing the integration of planning with learning processes is essential for adapting to dynamic environments. This is evident in works such as PAE: Reinforcement Learning from External Knowledge for Efficient Exploration<sup>16{\"chunk_id\":\"1\", \"paper_id\":\"65ea8bf513fb2c6cf630b8c0\"}</sup>, which proposes a novel paradigm for guiding agent learning to absorb external knowledge called PAE: Planner-Actor-Evaluator. Additionally, Online Continual Learning for Interactive Instruction Following Agents<sup>64{\"chunk_id\":\"2\", \"paper_id\":\"65ea8b1513fb2c6cf62ff2a5\"}</sup> introduces two incremental learning setups to address the challenges of continuous learning with natural language understanding and object localization. Making Better Decision by Directly Planning in Continuous Control<sup>40{\"chunk_id\":\"0\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup> highlights the effectiveness of incorporating a planner based on a learned environment model for decision-making. Learning to Learn: How to Continuously Teach Humans and Machines<sup>10{\"chunk_id\":\"0\", \"paper_id\":\"6385788690e50fcafdf4a229\"}</sup> emphasizes the importance of curriculum design in continual learning algorithms. Lastly, Towards Large-scale 3D Representation Learning with Multi-dataset Point Prompt Training<sup>90{\"chunk_id\":\"2\", \"paper_id\":\"64e2e15a3fda6d7f06466aed\"}</sup> proposes Point Prompt Training to adapt pre-trained models to diverse domains, showcasing the integration of learning and planning across different datasets.\n",
      "\n",
      "3. **Enhancing Model-Based Planning**: Continuously improving model-based planning approaches, such as MPC, is vital. MPC refers to the use of model-based search or planning over a short horizon for selecting an action, and it has been demonstrated to be effective for control of high DoF systems like humanoids and quadrupeds. Despite its effectiveness, MPC is computationally intensive and its deployment on hardware is limited by the computational speed of the planner. To address this, researchers have explored different ways to make MPC more computationally efficient, such as planning in lower dimensional reduced coordinate models or learning proposals for MPC and planner distillation. Overall, MPC with learned models can lead to more data efficiency, and planners can be amortized effectively into compact policies, although it is not a silver bullet and model-free methods are strong baselines<sup>59{\"chunk_id\":\"0\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>7{\"chunk_id\":\"1\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>41{\"chunk_id\":\"2\", \"paper_id\":\"615fb6f15244ab9dcb9c3e05\"}</sup><sup>22{\"chunk_id\":\"7\", \"paper_id\":\"63dcdb422c26941cf00b62f3\"}</sup><sup>78{\"chunk_id\":\"1\", \"paper_id\":\"635753cb90e50fcafdddd818\"}</sup>.\n",
      "\n",
      "4. **Advancing Planner Amortization**: Further developing planner amortization techniques to handle complex, constrained tasks more efficiently is necessary. This includes generalizing methods to higher-order reasoning<sup>56{\"chunk_id\":\"4\", \"paper_id\":\"64e5846c3fda6d7f063ac8a4\"}</sup> and integrating them into practical model-based reinforcement learning frameworks<sup>{\"chunk_id\":\"6\", \"paper_id\":\"63dcdb422c26941cf00\n",
      "\n",
      "## 7 Conclusion \n",
      "This survey delves into the methods to enhance the planning capability of large-scale models, examining their strengths and weaknesses. It encompasses various domains such as few-shot learning<sup>49{\"chunk_id\":\"7\", \"paper_id\":\"64fe78e03fda6d7f06a430bc\"}</sup>, object detection<sup>26{\"chunk_id\":\"28\", \"paper_id\":\"62b2889d5aee126c0fbd3184\"}</sup>, and autonomous driving<sup>62{\"chunk_id\":\"2\", \"paper_id\":\"65fc055e13fb2c6cf6df2558\"}</sup>. The key findings reveal that meta-learning and model-based approaches are effective in improving planning capabilities, while also highlighting the importance of handling constraints and uncertainties<sup>54{\"chunk_id\":\"0\", \"paper_id\":\"65c2e6db939a5f408204515b\"}</sup><sup>72{\"chunk_id\":\"2\", \"paper_id\":\"635753cc90e50fcafdddde31\"}</sup><sup>86{\"chunk_id\":\"0\", \"paper_id\":\"6577ca70939a5f4082347aab\"}</sup><sup>60{\"chunk_id\":\"5\", \"paper_id\":\"65810e5b939a5f4082fcfd71\"}</sup>. Additionally, the integration of external knowledge and the use of interpretable models enhance the interpretability and trustworthiness of planning systems<sup>51{\"chunk_id\":\"1\", \"paper_id\":\"6584ff16939a5f408239a946\"}</sup><sup>75{\"chunk_id\":\"1\", \"paper_id\":\"6544571e939a5f4082e79391\"}</sup><sup>5{\"chunk_id\":\"0\", \"paper_id\":\"616e37435244ab9dcbd1a6fa\"}</sup><sup>12{\"chunk_id\":\"0\", \"paper_id\":\"6456ee93d68f896efa58f3e1\"}</sup>. The implications of these findings suggest that future research should focus on developing algorithms that can efficiently balance exploration and exploitation<sup>42{\"chunk_id\":\"0\", \"paper_id\":\"65bc4e19939a5f4082a2d377\"}</sup><sup>68{\"chunk_id\":\"6\", \"paper_id\":\"61f753205aee126c0f9c2079\"}</sup><sup>48{\"chunk_id\":\"6\", \"paper_id\":\"64e5849c3fda6d7f063af389\"}</sup>, while also considering the scalability and computational efficiency of these methods. Furthermore, the proposed methods should be evaluated on diverse and complex environments to ensure robustness and generalizability.\n",
      "\n",
      "# References\n",
      "\n",
      "[1] Model Breadcrumbs: Scaling Multi-Task Model Merging with Sparse Masks ECCV2024 chunk_1 id:65791808939a5f4082d9aaa0\n",
      "\n",
      "[2] Can LLMs Fix Issues with Reasoning Models? Towards More Likely Models for AI Planning AAAI2024 chunk_0 id:6563febd939a5f40822112d8\n",
      "\n",
      "[3] LoTa-Bench: Benchmarking Language-oriented Task Planners for Embodied Agents ICLR2024 chunk_6 id:65cc1faf939a5f4082e2bf23\n",
      "\n",
      "[4] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_10 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[5] Generated Knowledge Prompting for Commonsense Reasoning ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_0 id:616e37435244ab9dcbd1a6fa\n",
      "\n",
      "[6] Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning AAAI2024 chunk_5 id:63f5888390e50fcafd27c631\n",
      "\n",
      "[7] Evaluating Model-Based Planning and Planner Amortization for Continuous Control ICLR_2022_International_Conference_on_Learning_Representation chunk_1 id:615fb6f15244ab9dcb9c3e05\n",
      "\n",
      "[8] From Ultra-Fine to Fine: Fine-tuning Ultra-Fine Entity Typing Models to Fine-grained. ACL_2023 chunk_6 id:64ae66f93fda6d7f0684b069\n",
      "\n",
      "[9] Solving Motion Planning Tasks with a Scalable Generative Model ECCV2024 chunk_6 id:668601cc01d2a3fbfcd35362\n",
      "\n",
      "[10] Learning to Learn: How to Continuously Teach Humans and Machines ICCV_2023 chunk_0 id:6385788690e50fcafdf4a229\n",
      "\n",
      "[11] Neural Amortized Inference for Nested Multi-agent Reasoning AAAI2024 chunk_14 id:64e5846c3fda6d7f063ac8a4\n",
      "\n",
      "[12] Interpretable by Design: Learning Predictors by Composing Interpretable Queries IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_0 id:6456ee93d68f896efa58f3e1\n",
      "\n",
      "[13] Multi-interactive Feature Learning and a Full-time Multi-modality Benchmark for Image Fusion and Segmentation ICCV_2023 chunk_4 id:64d074bf3fda6d7f06ce9265\n",
      "\n",
      "[14] V2X-Seq: A Large-Scale Sequential Dataset for Vehicle-Infrastructure Cooperative Perception and Forecasting CVPR_2023 chunk_5 id:645c5e47d68f896efa22ccef\n",
      "\n",
      "[15] Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning AAAI2024 chunk_0 id:63f5888390e50fcafd27c631\n",
      "\n",
      "[16] PAE: Reinforcement Learning from External Knowledge for Efficient Exploration ICLR2024 chunk_1 id:65ea8bf513fb2c6cf630b8c0\n",
      "\n",
      "[17] Confident Approximate Policy Iteration for Efficient Local Planning in Q^π-Realizable MDPs NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:635f3ca090e50fcafd3f5738\n",
      "\n",
      "[18] Directional Connectivity-based Segmentation of Medical Images CVPR_2023 chunk_6 id:642b955990e50fcafd82a532\n",
      "\n",
      "[19] LEVI: Generalizable Fine-tuning Via Layer-wise Ensemble of Different Views ICML2024 chunk_4 id:65c437c7939a5f4082d8c707\n",
      "\n",
      "[20] PAE: Reinforcement Learning from External Knowledge for Efficient Exploration ICLR2024 chunk_0 id:65ea8bf513fb2c6cf630b8c0\n",
      "\n",
      "[21] No More Ambiguity in 360° Room Layout Via Bi-Layout Estimation. CVPR2024 chunk_9 id:65fc055e13fb2c6cf6df252c\n",
      "\n",
      "[22] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_7 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[23] Physics-Informed Neural Network Policy Iteration: Algorithms, Convergence, and Verification ICML2024 chunk_6 id:65cec2c4939a5f40828f4198\n",
      "\n",
      "[24] Simplifying Complex Observation Models in Continuous POMDP Planning with Probabilistic Guarantees and Practice AAAI2024 chunk_5 id:65543326939a5f40820ac7fc\n",
      "\n",
      "[25] Local Latent Space Bayesian Optimization over Structured Inputs NeurIPS_2022_Neural_Information_Processing_Systems chunk_6 id:61f753205aee126c0f9c20bc\n",
      "\n",
      "[26] 3D Object Detection for Autonomous Driving: A Comprehensive Survey International_Journal_of_Computer_Vision chunk_28 id:62b2889d5aee126c0fbd3184\n",
      "\n",
      "[27] Simplifying Complex Observation Models in Continuous POMDP Planning with Probabilistic Guarantees and Practice AAAI2024 chunk_5 id:65543326939a5f40820ac7fc\n",
      "\n",
      "[28] Gradient-Based Mixed Planning with Symbolic and Numeric Action Parameters Artificial_Intelligence chunk_2 id:616f88d95244ab9dcbbe8ebd\n",
      "\n",
      "[29] ColD Fusion: Collaborative Descent for Distributed Multitask Finetuning ACL_2023 chunk_4 id:638d614990e50fcafd14d1f6\n",
      "\n",
      "[30] Sub-Task Decomposition Enables Learning in Sequence to Sequence Tasks ICLR_2023 chunk_6 id:624e569f5aee126c0f7f916f\n",
      "\n",
      "[31] PARA-Drive: Parallelized Architecture for Real-time Autonomous Driving CVPR2024 chunk_2 id:65fc055f13fb2c6cf6df28f2\n",
      "\n",
      "[32] Amortized Projection Optimization for Sliced Wasserstein Generative Models NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:6241273e5aee126c0f292b28\n",
      "\n",
      "[33] Task Planning for Object Rearrangement in Multi-Room Environments AAAI2024 chunk_1 id:660288f013fb2c6cf6d338d7\n",
      "\n",
      "[34] TRoVE: Transforming Road Scene Datasets into Photorealistic Virtual Environments ECCV_2022_European_Conference_on_Computer_Vision chunk_6 id:62fdae2c90e50fcafdd60192\n",
      "\n",
      "[35] Real-Time BDI Agents: a Model and Its Implementation IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence chunk_9 id:62708f625aee126c0fa69415\n",
      "\n",
      "[36] CoBERL: Contrastive BERT for Reinforcement Learning ICLR_2022_International_Conference_on_Learning_Representation chunk_1 id:60ee3a7f91e01102f8efa536\n",
      "\n",
      "[37] X-Pruner: Explainable Pruning for Vision Transformers. CVPR_2023 chunk_5 id:640a9ff890e50fcafd03c58e\n",
      "\n",
      "[38] Handling Long and Richly Constrained Tasks Through Constrained Hierarchical Reinforcement Learning AAAI2024 chunk_1 id:63f5888390e50fcafd27c631\n",
      "\n",
      "[39] Learning to Build by Building Your Own Instructions ECCV2024 chunk_1 id:66fe07fb01d2a3fbfcd4449d\n",
      "\n",
      "[40] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_0 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[41] Evaluating Model-Based Planning and Planner Amortization for Continuous Control ICLR_2022_International_Conference_on_Learning_Representation chunk_2 id:615fb6f15244ab9dcb9c3e05\n",
      "\n",
      "[42] Efficient Exploration for LLMs ICML2024 chunk_0 id:65bc4e19939a5f4082a2d377\n",
      "\n",
      "[43] Gradient-Based Mixed Planning with Symbolic and Numeric Action Parameters Artificial_Intelligence chunk_13 id:616f88d95244ab9dcbbe8ebd\n",
      "\n",
      "[44] Can LLMs Fix Issues with Reasoning Models? Towards More Likely Models for AI Planning AAAI2024 chunk_6 id:6563febd939a5f40822112d8\n",
      "\n",
      "[45] Finding Materialized Models for Model Reuse IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_2 id:64e95c5f3fda6d7f0605284f\n",
      "\n",
      "[46] Gradient-Based Mixed Planning with Symbolic and Numeric Action Parameters Artificial_Intelligence chunk_2 id:616f88d95244ab9dcbbe8ebd\n",
      "\n",
      "[47] Model Reprogramming: Resource-Efficient Cross-Domain Machine Learning AAAI2024 chunk_0 id:6215a5c85aee126c0f3375e4\n",
      "\n",
      "[48] FoX: Formation-aware Exploration in Multi-Agent Reinforcement Learning AAAI2024 chunk_6 id:64e5849c3fda6d7f063af389\n",
      "\n",
      "[49] CDFSL-V: Cross-Domain Few-Shot Learning for Videos ICCV_2023 chunk_7 id:64fe78e03fda6d7f06a430bc\n",
      "\n",
      "[50] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_6 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[51] Q-SENN: Quantized Self-Explaining Neural Networks AAAI2024 chunk_1 id:6584ff16939a5f408239a946\n",
      "\n",
      "[52] Evaluating Model-Based Planning and Planner Amortization for Continuous Control ICLR_2022_International_Conference_on_Learning_Representation chunk_6 id:615fb6f15244ab9dcb9c3e05\n",
      "\n",
      "[53] Can LLMs Fix Issues with Reasoning Models? Towards More Likely Models for AI Planning AAAI2024 chunk_7 id:6563febd939a5f40822112d8\n",
      "\n",
      "[54] More Flexible PAC-Bayesian Meta-Learning by Learning Learning Algorithms ICML2024 chunk_0 id:65c2e6db939a5f408204515b\n",
      "\n",
      "[55] LPViT: Low-Power Semi-structured Pruning for Vision Transformers ECCV2024 chunk_5 id:6684b06d01d2a3fbfce33f3b\n",
      "\n",
      "[56] Neural Amortized Inference for Nested Multi-agent Reasoning AAAI2024 chunk_4 id:64e5846c3fda6d7f063ac8a4\n",
      "\n",
      "[57] Unsupervised 3D Shape Reconstruction by Part Retrieval and Assembly CVPR_2023 chunk_5 id:640559c690e50fcafddb5292\n",
      "\n",
      "[58] PARA-Drive: Parallelized Architecture for Real-time Autonomous Driving CVPR2024 chunk_1 id:65fc055f13fb2c6cf6df28f2\n",
      "\n",
      "[59] Evaluating Model-Based Planning and Planner Amortization for Continuous Control ICLR_2022_International_Conference_on_Learning_Representation chunk_0 id:615fb6f15244ab9dcb9c3e05\n",
      "\n",
      "[60] Constrained Meta-Reinforcement Learning for Adaptable Safety Guarantee with Differentiable Convex Programming AAAI2024 chunk_5 id:65810e5b939a5f4082fcfd71\n",
      "\n",
      "[61] Task Planning for Object Rearrangement in Multi-Room Environments AAAI2024 chunk_0 id:660288f013fb2c6cf6d338d7\n",
      "\n",
      "[62] Few-Shot Object Detection with Foundation Models CVPR2024 chunk_2 id:65fc055e13fb2c6cf6df2558\n",
      "\n",
      "[63] Complexity-Based Prompting for Multi-Step Reasoning ICLR_2023 chunk_6 id:633ba44890e50fcafdfe4f9e\n",
      "\n",
      "[64] Online Continual Learning for Interactive Instruction Following Agents ICLR2024 chunk_2 id:65ea8b1513fb2c6cf62ff2a5\n",
      "\n",
      "[65] Probing the 3D Awareness of Visual Foundation Models CVPR2024 chunk_11 id:65fc055e13fb2c6cf6df2644\n",
      "\n",
      "[66] Multi-Level Logit Distillation CVPR_2023 chunk_5 id:6464af75d68f896efa352203\n",
      "\n",
      "[67] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_2 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[68] Modeling Human Exploration Through Resource-Rational Reinforcement   Learning NeurIPS_2022_Neural_Information_Processing_Systems chunk_6 id:61f753205aee126c0f9c2079\n",
      "\n",
      "[69] Efficient Planning with Latent Diffusion ICLR2024 chunk_0 id:651b79a33fda6d7f0628d47e\n",
      "\n",
      "[70] Re-basin Via Implicit Sinkhorn Differentiation CVPR_2023 chunk_4 id:63a910a290e50fcafd2a8522\n",
      "\n",
      "[71] DrS: Learning Reusable Dense Rewards for Multi-Stage Tasks ICLR2024 chunk_15 id:65ea89cc13fb2c6cf62ed74a\n",
      "\n",
      "[72] MM-Align: Learning Optimal Transport-based Alignment Dynamics for Fast and Accurate Inference on Missing Modality Sequences EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_2 id:635753cc90e50fcafdddde31\n",
      "\n",
      "[73] Making Better Decision by Directly Planning in Continuous Control ICLR_2023 chunk_5 id:63dcdb422c26941cf00b62f3\n",
      "\n",
      "[74] Physics-Informed Neural Network Policy Iteration: Algorithms, Convergence, and Verification ICML2024 chunk_6 id:65cec2c4939a5f40828f4198\n",
      "\n",
      "[75] Revisiting the Knowledge Injection Frameworks EMNLP_2023 chunk_1 id:6544571e939a5f4082e79391\n",
      "\n",
      "[76] 3D Object Detection for Autonomous Driving: A Comprehensive Survey International_Journal_of_Computer_Vision chunk_7 id:62b2889d5aee126c0fbd3184\n",
      "\n",
      "[77] HORIZON: High-Resolution Semantically Controlled Panorama Synthesis AAAI2024 chunk_0 id:6344dedf90e50fcafd24d3a1\n",
      "\n",
      "[78] Bounded-Regret MPC Via Perturbation Analysis: Prediction Error, Constraints, and Nonlinearity. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1 id:635753cb90e50fcafdddd818\n",
      "\n",
      "[79] ClimateGAN: Raising Climate Change Awareness by Generating Images of Floods ICLR_2022_International_Conference_on_Learning_Representation chunk_5 id:615e657b5244ab9dcbf21ff8\n",
      "\n",
      "[80] Bad Students Make Great Teachers: Active Learning Accelerates Large-Scale Visual Understanding ECCV2024 chunk_2 id:6577c976939a5f40822e419a\n",
      "\n",
      "[81] Confident Approximate Policy Iteration for Efficient Local Planning in Q^π-Realizable MDPs NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:635f3ca090e50fcafd3f5738\n",
      "\n",
      "[82] Towards Deeply Unified Depth-aware Panoptic Segmentation with Bi-directional Guidance Learning ICCV_2023 chunk_5 id:64c33dc33fda6d7f06958638\n",
      "\n",
      "[83] Amortizing Intractable Inference in Large Language Models ICLR2024 chunk_0 id:652379bb939a5f4082e1b911\n",
      "\n",
      "[84] Wish You Were Here: Hindsight Goal Selection for Long-Horizon Dexterous Manipulation ICLR_2022_International_Conference_on_Learning_Representation chunk_6 id:61a839675244ab9dcbb15189\n",
      "\n",
      "[85] Active Neural Mapping ICCV_2023 chunk_6 id:64f59fb33fda6d7f0648d21f\n",
      "\n",
      "[86] Hacking Task Confounder in Meta-Learning IJCAI2024 chunk_0 id:6577ca70939a5f4082347aab\n",
      "\n",
      "[87] Generative Planning for Temporally Coordinated Exploration in Reinforcement Learning ICLR_2022_International_Conference_on_Learning_Representation chunk_0 id:61ef6aea5244ab9dcb688ba6\n",
      "\n",
      "[88] Local Latent Space Bayesian Optimization over Structured Inputs NeurIPS_2022_Neural_Information_Processing_Systems chunk_6 id:61f753205aee126c0f9c20bc\n",
      "\n",
      "[89] Jointly Training and Pruning CNNs Via Learnable Agent Guidance and Alignment CVPR2024 chunk_0 id:65fc055f13fb2c6cf6df28a0\n",
      "\n",
      "[90] Towards Large-scale 3D Representation Learning with Multi-dataset Point Prompt Training CVPR2024 chunk_2 id:64e2e15a3fda6d7f06466aed\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[]"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import re\n",
    "new_content = re.sub(r'<sup>X.*?</sup>', '', content)\n",
    "print(new_content)\n",
    "re.findall(r'<sup>X.*?</sup>', new_content)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-02-12 14:52:35,627 - research_agent.core.pipeline_reference - INFO - 开始处理全文，文本长度：27881\n",
      "2025-02-12 14:52:35,629 - research_agent.core.pipeline_reference - INFO - 拆分为 18 个 section\n",
      "2025-02-12 14:52:35,690 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 1 Introduction\n",
      "\n",
      "，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,691 - research_agent.core.pipeline_reference - INFO - 同步处理短 section\n",
      "2025-02-12 14:52:35,693 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 2.1 Construction，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,695 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 2.2 Application ，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,695 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 3.1 The M-NTA Ap，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,696 - research_agent.core.pipeline_reference - INFO - 同步处理短 section\n",
      "2025-02-12 14:52:35,696 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 3.2 Impact on Do，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,698 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 4.1 Recommendati，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,698 - research_agent.core.pipeline_reference - INFO - 同步处理短 section\n",
      "2025-02-12 14:52:35,698 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 4.2 Passage Re-r，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,701 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 4.3 Knowledge-Gr，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,702 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 5.1 Persistent H，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,702 - research_agent.core.pipeline_reference - INFO - 同步处理短 section\n",
      "2025-02-12 14:52:35,702 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 5.2 Pre-trained ，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,705 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 5.1 Persistent H，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,705 - research_agent.core.pipeline_reference - INFO - 同步处理短 section\n",
      "2025-02-12 14:52:35,707 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 6.2 Graph vs. Se，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:35,708 - research_agent.core.pipeline_reference - INFO - 开始处理:section\n",
      "## 7 Conclusion \n",
      "Th，主题What does the technology development roadmap for multi-modal large models look like?\n",
      "2025-02-12 14:52:37,689 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 14:52:38,074 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 14:54:17,984 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 2.1 Construction 处理成功\n",
      "2025-02-12 14:54:19,997 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 14:55:41,125 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 1 Introduction\n",
      "\n",
      " 处理成功\n",
      "2025-02-12 14:55:43,141 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 14:56:58,480 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 2.2 Application  处理成功\n",
      "2025-02-12 14:57:00,483 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 14:58:48,059 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 3.2 Impact on Do 处理成功\n",
      "2025-02-12 14:58:50,064 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 15:01:24,682 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 3.1 The M-NTA Ap 处理成功\n",
      "2025-02-12 15:01:26,693 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 15:02:18,140 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 4.1 Recommendati 处理成功\n",
      "2025-02-12 15:02:20,148 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 15:04:43,304 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 4.2 Passage Re-r 处理成功\n",
      "2025-02-12 15:04:45,316 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 15:05:14,612 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 4.3 Knowledge-Gr 处理成功\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 15:05:16,602 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 15:07:16,158 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 5.2 Pre-trained  处理成功\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "******************** list indices must be integers or slices, not str ********************\n",
      "******************** 'statement' ********************\n",
      "******************** list indices must be integers or slices, not str ********************\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-02-12 15:07:18,198 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 15:09:11,966 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 5.1 Persistent H 处理成功\n",
      "2025-02-12 15:09:13,972 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "2025-02-12 15:09:40,240 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 5.1 Persistent H 处理成功\n",
      "2025-02-12 15:09:42,266 - research_agent.core.pipeline_reference - DEBUG - 尝试运行 pipeline，重试次数：0\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "LLM request failed with status code: 429\n",
      "2025-02-12 15:11:16,270 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 7 Conclusion \n",
      "Th 处理成功\n",
      "2025-02-12 15:12:52,338 - research_agent.core.pipeline_reference - INFO - section \n",
      "## 6.2 Graph vs. Se 处理成功\n",
      "2025-02-12 15:12:52,340 - research_agent.core.pipeline_reference - INFO - 所有 section 处理完毕\n",
      "2025-02-12 15:12:52,341 - research_agent.core.pipeline_reference - INFO - 开始处理引用部分\n",
      "2025-02-12 15:12:52,341 - research_agent.core.pipeline_reference - DEBUG - 提取到 96 个引用\n",
      "2025-02-12 15:12:52,341 - research_agent.core.pipeline_reference - INFO - 开始替换引用为数字，处理 96 个引用\n",
      "2025-02-12 15:13:22,735 - research_agent.core.pipeline_reference - WARNING - 未找到引用对应的论文信息，引用内容：{\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,532 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,532 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,533 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,533 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,534 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,534 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,535 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,535 - research_agent.core.pipeline_reference - WARNING - 未找到匹配的引用: {\"chunk_id\":\"\", \"paper_id\":\"\"}\n",
      "2025-02-12 15:14:36,537 - research_agent.core.pipeline_reference - INFO - 引用替换完成，生成 95 条参考文献\n",
      "2025-02-12 15:14:36,537 - research_agent.core.pipeline_reference - INFO - 生成最终文档\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "## 1 Introduction\n",
      "\n",
      "This research survey delves into the advancements and challenges in knowledge graph construction and applications<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. It encompasses a comprehensive overview of multi-modal knowledge graphs, which integrate textual and visual information to enhance machine understanding. The survey also investigates the enhancement of textual information in multilingual knowledge graphs, aiming to bridge the gap between English and non-English languages. Additionally, it explores the application of knowledge graphs in robust recommendation systems, passage re-ranking, and knowledge-grounded dialogue systems. Furthermore, the survey analyzes the incorporation of explicit knowledge from knowledge graphs into pre-trained language models. It also reviews the latest research on efficient evaluation methods for knowledge graph completion and the utilization of Wikidata as a seed for web extraction. Moreover, the survey discusses the application of graph neural networks for temporal knowledge graph reasoning and the comparison of graph and sequence representations for knowledge-grounded dialogue. Additionally, it covers the use of masked generation feature distillation for knowledge graph completion and the integration of explicit knowledge into pre-trained language models for passage re-ranking. Furthermore, the survey delves into the construction and application of multi-modal knowledge graphs, including the use of modality split and ensemble methods. It also explores the challenges and opportunities in continual knowledge graph embedding and the application of curriculum-enhanced attention distillation for training graph transformers. Lastly, the survey presents a large-scale financial dataset for graph anomaly detection, DGraph, and a motif-aware Riemannian graph neural network with generative-contrastive learning, MotifRGC. DGraph overcomes many limitations of current GAD datasets, containing about 3M nodes, 4M dynamic edges, and 1M ground-truth nodes, while MotifRGC addresses issues in Riemannian graph representation learning by capturing motif regularity in a diverse-curvature manifold without labels.\n",
      "\n",
      "## 2 Multi-Modal Knowledge Graphs\n",
      "## 2.1 Construction of Multi-Modal Knowledge Graphs\n",
      "\n",
      "In this subsection, we delve into the construction of multi-modal knowledge graphs (MKGs), which integrate textual and visual information to enhance machine understanding. Multi-Modal Knowledge Graphs (MKGs) integrate textual and visual information to enhance machine understanding, as evidenced by the survey on MMKGs by Xiangru Zhu et al. which defines and discusses the benefits of MMKGs in various multi-modal tasks, and the work of Xie et al. which proposes a hybrid transformer framework for unified multimodal KGC.<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> We review the methods for constructing MKGs by integrating textual and visual information. Methods for constructing MKGs by integrating textual and visual information are reviewed.<sup>87{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"16\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> <sup>11{\"paper_title\":\"Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion\", \"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup> <sup>10{\"paper_title\":\"NativE: Multi-modal Knowledge Graph Completion in the Wild\", \"chunk_id\":\"5\", \"paper_id\":\"6614c27c13fb2c6cf65095af\"}</sup> <sup>86{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"15\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> Firstly, we discuss the construction of MKGs from images to symbols, where images are labeled with symbols in the knowledge graph. This process, known as symbol grounding, refers to the process of finding proper multi-modal data items such as images to denote a symbol knowledge exists in a traditional KG. Compared to the image labeling way, the symbol grounding way is more widely applied for MMKG construction. Most of the existing MMKGs are constructed in this way, as detailed in the survey 'Multi-Modal Knowledge Graph Construction and Application: A Survey' [6209c8295aee126c0f1e86c0]. Secondly, we explore the construction of MKGs from symbols to images, where symbols in the knowledge graph are grounded to corresponding images. This process, known as symbol grounding, refers to the process of finding proper multi-modal data items such as images to denote a symbol knowledge exists in a traditional KG. Compared to the image labeling way, the symbol grounding way is more widely applied for MMKG construction. Most of the existing MMKGs are constructed in this way, as listed in Table 2b of the survey by Xiangru Zhu et al.<sup>68{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"8\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>.\n",
      "\n",
      "We also investigate the challenges, progresses, and opportunities in correlating various symbol knowledge to their corresponding images. For instance, the MetaCLUE research initiative has proposed tasks that aim at evaluating progress on visual metaphor research, including Classification, Understanding (Retrieval, Captioning, VQA), Localization, and Generation<sup>25{\"paper_title\":\"MetaCLUE: Towards Comprehensive Visual Metaphors Research\", \"chunk_id\":\"5\", \"paper_id\":\"63a2794890e50fcafd293f21\"}</sup>. Additionally, the process of symbol grounding is crucial for multi-modal knowledge graph construction, where symbols are associated with appropriate images, and this is widely applied in various grounding tasks such as Entity Grounding, Concept Grounding, and Relation Grounding<sup>68{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"8\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. Further, the development of image-based attention prompting for large vision-language models presents new research opportunities and potential impacts, such as reducing computational costs and addressing biases<sup>33{\"paper_title\":\"Attention Prompting on Image for Large Vision-Language Models\", \"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}</sup>. These advancements contribute to the broader goal of visual information extraction in the wild, where entities are modeled as semantic points to handle diverse and complex image structures<sup>{\"paper_title\":\"Modeling Entities As Semantic Points for Visual Information Extraction in the Wild\", \"chunk_id\":\"6\", \"paper_id\":\"641d14e090e50fcafdf73\n",
      "## 2.2 Application of Multi-Modal Knowledge Graphs\n",
      "\n",
      "In this subsection, we explore the application of multi-modal knowledge graphs (MKGs) in downstream tasks such as multimodal link prediction, named entity recognition, and relation extraction. Multi-modal knowledge graphs, as defined in a systematic review by Zhu et al. [6209c8295aee126c0f1e86c0], are semantic networks that include entities, concepts, and relationships, enhanced with visual information from images. Firstly, we discuss the use of MKGs in multimodal link prediction, where associated images enhance entity representation for missing triple prediction<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>. Secondly, we investigate the application of MKGs in multimodal named entity recognition (MNER) and multimodal relation extraction (MRE), where corresponding images complement textual contexts for entity and relation extraction<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup> <sup>59{\"chunk_id\":\"1\", \"paper_id\":\"659cf440939a5f4082bb9afb\"}</sup> <sup>60{\"chunk_id\":\"1\", \"paper_id\":\"646aecaad68f896efa05a77c\"}</sup>. Additionally, we present a survey on MKGs constructed by texts and images, providing definitions, preliminaries, and a comprehensive review on the construction and application of MKGs<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup><sup>51{\"chunk_id\":\"16\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. Furthermore, we analyze the architecture universality and modality contradiction limitations of existing multimodal KGC models<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>. Finally, we propose MKGformer, a hybrid transformer framework for unified multimodal KGC, implementing multi-level fusion with coarse-grained prefix-guided interaction module and fine-grained correlation-aware fusion module in blocks of transformers<sup>16{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>.\n",
      "\n",
      "## 3 Enhancing Textual Information in Multilingual Knowledge Graphs\n",
      "## 3.1 The M-NTA Approach\n",
      "\n",
      "The M-NTA approach, which stands for Multi-source Naturalization, Translation, and Alignment, is a novel method designed to enhance the textual information in multilingual knowledge graphs<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. This approach addresses the disparity in coverage and precision of textual information between English and non-English languages<sup>90{\"chunk_id\":\"2\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>45{\"chunk_id\":\"2\", \"paper_id\":\"654af002939a5f40823c8316\"}</sup> <sup>26{\"chunk_id\":\"5\", \"paper_id\":\"656e8e1a939a5f408286c92f\"}</sup> <sup>43{\"chunk_id\":\"4\", \"paper_id\":\"646d8642d68f896efa0a2fc8\"}</sup>. M-NTA combines machine translation, web search, and large language models to generate high-quality textual information for non-English languages<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. It takes a three-step approach: naturalization, translation, and alignment. In the naturalization step, M-NTA retrieves the textual description of an entity in the source language from Wikidata and uses it to produce a natural language representation of the entity<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. This allows M-NTA to rely on different representations for polysemous words<sup>77{\"chunk_id\":\"0\", \"paper_id\":\"655ebe83939a5f4082bb6cd0\"}</sup>. In the translation step, M-NTA translates the representation from the source language to the target language using a system<sup>52{\"chunk_id\":\"0\", \"paper_id\":\"63d7352390e50fcafda3044b\"}</sup>. Finally, in the alignment step, M-NTA aligns the translated output with the input to extract the entity name in the target language. M-NTA is transparent to the definition of a source system, allowing it to leverage any system that is able to produce the entity name in the target language. This approach has been shown to improve the coverage and precision of textual information in multilingual knowledge graphs, benefiting downstream applications such as entity linking, knowledge graph completion, and question answering<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "## 3.2 Impact on Downstream Tasks\n",
      "\n",
      "In this subsection, we demonstrate the beneficial impact of KGE on downstream tasks and its effectiveness in improving the performance of state-of-the-art techniques in multilingual Entity Linking and Knowledge Graph Completion; we also show that KGE is beneficial for multilingual Question Answering in Appendix E.\n",
      "\n",
      "Multilingual Entity Linking (MEL). A direct application of increasing the quantity and quality of textual information in a knowledge graph is MEL, the task of linking a textual mention to an entity in a multilingual knowledge base (Botha et al., 2020)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. We evaluate the impact of our work on mGENRE (De Cao et al., 2022)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, a state-of-the-art MEL system that fine-tunes mBART (Lewis et al., 2020) to autoregressively generate a Wikidata entity name for a mention in context. As noted by De Cao et al. (2022)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, mGENRE generates entity names by also copying relevant portions of the input mention; however, copying is not possible when the mention of the entity is in a language for which Wikidata does not feature any names. By increasing the coverage and precision of textual information in Wikidata, M-NTA provides mGENRE with a broader coverage of entity names in non-English languages, aiding mGENRE's capability to rely on copying mechanisms. Indeed, as we can see in Table 3, augmenting mGENRE with M-NTA brings an improvement of 1.2 points in F1 score on average in Wikinews-7, setting a new state-of-the-art on this benchmark.\n",
      "\n",
      "Multilingual Knowledge Graph Completion (MKGC). Another direct application of KGE is MKGC, the task of predicting missing links between two entities in a multilingual knowledge base (Chen et al., 2020a). Similarly to MEL, we evaluate the downstream impact of our work on a re-implementation of Align-KGC (SoftAsym)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, a state-of-the-art MKGC system originally proposed by Chakrabarti et al. (2022), which we rebuilt to use our entity names and descriptions to create mBERT-based entity embeddings. As shown in Table 4, using M-NTA to provide more and better entity names and descriptions allows the MKGC system to obtain a consistent improvement across non-English languages on DBP-5L (Chen et al., 2020a), i.e., +1.5 points in terms of Mean Reciprocal Rank (MRR), excluding English. We hypothesize that the larger part of this improvement comes from the fact that the entity descriptions generated by M-NTA are more informative, as suggested by the examples shown in Appendix C.7 (see Table 7)<sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "\n",
      "In summary, our experiments demonstrate that KGE can effectively enhance the performance of state-of-the-art systems in downstream applications such as multilingual entity linking, knowledge graph completion, and question answering. These results underscore the importance of increasing the coverage and precision of textual information in multilingual knowledge graphs for enabling better and more inclusive multilingual applications.\n",
      "\n",
      "## 4 Applications of Knowledge Graphs\n",
      "## 4.1 Recommendation Systems\n",
      "\n",
      "In this subsection, we delve into the application of knowledge graphs in building robust recommendation systems. Knowledge graphs provide a rich source of structured information about entities, which can be leveraged to enhance the performance of recommendation algorithms<sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup><sup>74{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>23{\"chunk_id\":\"1\", \"paper_id\":\"660253bd13fb2c6cf6088509\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. Firstly, we discuss the use of knowledge graphs for collaborative filtering, where the relationships between users and items are modeled as a graph. By analyzing the user-item interaction graph, we can identify communities of users with similar preferences and recommend items to users based on the preferences of their neighbors<sup>39{\"chunk_id\":\"1\", \"paper_id\":\"644744fb71ac66d2cbf9bc82\"}</sup><sup>27{\"chunk_id\":\"2\", \"paper_id\":\"6504d4033fda6d7f06ca7ddf\"}</sup><sup>56{\"chunk_id\":\"1\", \"paper_id\":\"664568f601d2a3fbfcd97fde\"}</sup><sup>76{\"chunk_id\":\"2\", \"paper_id\":\"62708f625aee126c0fa69413\"}</sup>. Secondly, we explore the application of knowledge graphs in content-based filtering, where the content of items is represented as a graph. By analyzing the content graph, we can identify similar items and recommend them to users based on their past preferences<sup>27{\"chunk_id\":\"2\", \"paper_id\":\"6504d4033fda6d7f06ca7ddf\"}</sup><sup>39{\"chunk_id\":\"1\", \"paper_id\":\"644744fb71ac66d2cbf9bc82\"}</sup><sup>85{\"chunk_id\":\"3\", \"paper_id\":\"620b19c25aee126c0f7e64b0\"}</sup>. Additionally, we present a survey on the use of knowledge graphs in recommendation systems, providing an overview of the different methods and their effectiveness. Techniques such as embedding-based methods, path-based methods, and GNN-based methods have been employed to enrich semantics, though they may not be specifically tailored for reciprocal recommendations. Explainable recommendation is also a key area with diverse styles, yet systems often generate generic explanations without considering the unique needs of each party in a reciprocal recommendation scenario. To address these challenges, various models like SpherE<sup>41{\"chunk_id\":\"3\", \"paper_id\":\"6631a2d501d2a3fbfc8c4a96\"}</sup>, KAERR<sup>40{\"chunk_id\":\"1\", \"paper_id\":\"6602676513fb2c6cf6154a6a\"}</sup>, KRDN<sup>31{\"chunk_id\":\"1\", \"paper_id\":\"6456385bd68f896efacf2377\"}</sup>, and ApeGNN<sup>92{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223890\"}</sup> have been proposed, demonstrating enhanced performance and addressing issues such as data sparsity, cold-start problems, and interaction noise. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into pre-trained language models for recommendation<sup>2{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup><sup>3{\"chunk_id\":\"1\", \"paper_id\":\"6602492813fb2c6cf676d713\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>22{\"chunk_id\":\"1\", \"paper_id\":\"64a63bbad68f896efaec478f\"}</sup><sup>93{\"chunk_id\":\"0\", \"paper_id\":\"6514e2043fda6d7f062dc9a9\"}</sup>. Finally, we analyze the impact of knowledge graphs on the robustness of recommendation systems, showing that the incorporation of knowledge graphs can improve the resistance of recommendation algorithms to adversarial attacks and provide more diverse and personalized recommendations<sup>{\"chunk_id\":\"4\", \"paper\n",
      "## 4.2 Passage Re-ranking\n",
      "\n",
      "In this subsection, we explore the integration of explicit knowledge from knowledge graphs into pre-trained language models for passage re-ranking. Firstly, we discuss the use of knowledge graphs to enhance the representation of passages by incorporating additional semantic information. By representing passages as nodes in a knowledge graph and modeling their relationships with other nodes, we can capture the rich semantic context of passages<sup>57{\"chunk_id\":\"2\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}</sup> <sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>53{\"chunk_id\":\"0\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup> <sup>32{\"chunk_id\":\"2\", \"paper_id\":\"644744fa71ac66d2cbf9b272\"}</sup> <sup>50{\"chunk_id\":\"4\", \"paper_id\":\"66026b7c13fb2c6cf64c85b9\"}</sup>. This enhanced representation can then be used to re-rank passages in response to a query, improving the relevance of search results.\n",
      "\n",
      "Secondly, we investigate the application of knowledge graphs in passage re-ranking by integrating explicit knowledge into pre-trained language models. Pre-trained language models, such as BERT, capture a wealth of semantic information from large-scale text corpora. These models own a powerful ability on contextual text representation, which is learned through a pre-training step using tasks like Next Sentence Prediction and Masked Language Modeling, and transferred to the downstream NLP tasks. The Transformer network's attention mechanism allows it to learn the contextual relationships between words, resulting in high-quality language feature representations. Multilingual-BERT and XLM-RoBERTa are examples of such models, pre-trained on a vast amount of data in multiple languages, demonstrating the scalability and effectiveness of this approach.\n",
      "\n",
      "By fine-tuning these models on knowledge graph data, we can inject explicit knowledge into their representations, enabling them to better understand the relationships between entities and concepts<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup> <sup>78{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup> <sup>71{\"chunk_id\":\"2\", \"paper_id\":\"6407fd3e90e50fcafd27470f\"}</sup> <sup>61{\"chunk_id\":\"4\", \"paper_id\":\"64a78f1fd68f896efa01e96a\"}</sup>. This integration of explicit knowledge can lead to more accurate passage re-ranking, as the models can leverage both the implicit knowledge captured during pre-training and the explicit knowledge provided by the knowledge graph<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "\n",
      "Additionally, we present a survey on the use of knowledge graphs in passage re-ranking, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into pre-trained language models for passage re-ranking. Explicit knowledge from knowledge graphs, such as synonyms and domain-specific knowledge, can help bridge the semantic gap between queries and passages, addressing the limitations of implicit knowledge extracted from noisy and heterogeneous data. Despite these challenges, recent studies have shown promising results in enhancing passage re-ranking with knowledge graphs, achieving state-of-the-art performance in both general and domain-specific data<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup><sup>2{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "\n",
      "Finally, we analyze the impact of knowledge graphs on the robustness of passage re-ranking, showing that the incorporation of knowledge graphs can improve the resistance of passage re-ranking algorithms to adversarial attacks and provide more relevant search results<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "## 4.3 Knowledge-Grounded Dialogue Systems\n",
      "\n",
      "In this subsection, we investigate the application of knowledge graphs in knowledge-grounded dialogue systems. Knowledge graphs provide a rich source of structured information about entities and concepts, which can be leveraged to enhance the performance of dialogue systems<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup>. Firstly, we discuss the use of knowledge graphs to ground the dialogue context by linking entities and concepts mentioned in the dialogue to their corresponding nodes in the knowledge graph. This grounding enables the dialogue system to access additional semantic information about the entities and concepts, improving the understanding of the dialogue context.\n",
      "\n",
      "Secondly, we explore the application of knowledge graphs in generating informative and coherent responses in knowledge-grounded dialogue. By incorporating explicit knowledge from the knowledge graph into the dialogue generation process, the system can generate responses that are not only relevant to the dialogue context but also provide additional information and explanations<sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>53{\"chunk_id\":\"0\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>30{\"chunk_id\":\"1\", \"paper_id\":\"62393e7e5aee126c0f125ea2\"}</sup><sup>29{\"chunk_id\":\"1\", \"paper_id\":\"65275736939a5f4082a46c09\"}</sup><sup>82{\"chunk_id\":\"1\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup>. This incorporation of explicit knowledge can lead to more engaging and informative dialogue interactions<sup>48{\"chunk_id\":\"5\", \"paper_id\":\"623a90055aee126c0f36c49f\"}</sup><sup>20{\"chunk_id\":\"5\", \"paper_id\":\"646d863cd68f896efa09f112\"}</sup><sup>70{\"chunk_id\":\"0\", \"paper_id\":\"623a90055aee126c0f36c49f\"}</sup>.\n",
      "\n",
      "Additionally, we present a survey on the use of knowledge graphs in knowledge-grounded dialogue systems, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into dialogue systems. Finally, we analyze the impact of knowledge graphs on the robustness and informativeness of dialogue systems, showing that the incorporation of knowledge graphs can improve the system's ability to handle complex and diverse dialogue scenarios<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>19{\"chunk_id\":\"4\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>46{\"chunk_id\":\"6\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup>.\n",
      "\n",
      "## 5 Efficient Evaluation Methods for Knowledge Graph Completion\n",
      "## 5.1 Persistent Homology\n",
      "\n",
      "Persistent homology is a method from the field of algebraic topology that has been proposed as an efficient alternative for evaluating knowledge graph completion methods<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. It is based on the concept of topological data analysis and can capture the geometry of the manifold on which the representations of entities and relations in a knowledge graph reside<sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup>. This property allows persistent homology to evaluate the quality of knowledge graph completion methods by looking only at a fraction of the data, reducing the quadratic complexity of considering all the data points (KG triples in this case) for ranking. Furthermore, persistent homology is robust to noise, mitigating issues due to the open-world problem in knowledge graphs. Experimental results on standard datasets have shown that the proposed metric is highly correlated with ranking metrics (Hits@N, MR, MRR) and can reduce the evaluation time of a KG completion method from 18 hours (using Hits@10) to 27 seconds (using persistent homology)<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. This makes persistent homology a promising approach for faster evaluation of knowledge graph completion methods, enabling more efficient prototyping and development of KGE systems.\n",
      "\n",
      "### 5.1.1 Theoretical Foundations\n",
      "\n",
      "The theoretical foundations of persistent homology lie in algebraic topology, which studies the properties of shapes and spaces by using algebraic constructs. Persistent homology analyzes the topological structure of data by tracking the appearance and disappearance of holes of various dimensions in the data as it is transformed<sup>9{\"chunk_id\":\"3\", \"paper_id\":\"6566a0f1939a5f408265ae0d\"}</sup> <sup>28{\"chunk_id\":\"2\", \"paper_id\":\"62b288a35aee126c0fbd7b92\"}</sup> <sup>72{\"chunk_id\":\"3\", \"paper_id\":\"5ff44b5b91e01130648dc4f0\"}</sup>. For knowledge graph completion, this means tracking the connectivity patterns between entities and relations as the knowledge graph is completed<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>15{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup> <sup>91{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}</sup> <sup>18{\"chunk_id\":\"2\", \"paper_id\":\"6279c9c65aee126c0fdae32d\"}</sup> <sup>73{\"chunk_id\":\"1\", \"paper_id\":\"622577a75aee126c0f008e2a\"}</sup>. The key idea is to represent the knowledge graph as a simplicial complex, where nodes represent entities, edges represent relations between entities, and higher-dimensional simplices represent more complex interactions. By analyzing the topological structure of this simplicial complex, persistent homology can provide insights into the quality of the knowledge graph completion.\n",
      "\n",
      "### 5.1.2 Implementation and Challenges\n",
      "\n",
      "Implementing persistent homology for knowledge graph completion evaluation requires several steps. First, graphs are constructed from positive and negative triples, with scores calculated by a Knowledge Graph Embedding (KGE) method as edge weights. Next, a filtration process is applied to these graphs, converting them into a lower-dimensional representation using persistent homology. Finally, the distance between the graphs is calculated using the Sliced Wasserstein distance to provide a final metric score. This method, termed Knowledge Persistence ($\\mathcal{KP}$), offers a more efficient evaluation alternative to traditional ranking metrics, reducing computational complexity from $O(|\\mathcal{E}|^2)$ to $O(|\\mathcal{E}|)$ and showing high correlation with ranking metrics (Hits@N, MR, MRR)<sup>12{\"chunk_id\":\"4\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. One challenge is the computational complexity of computing persistent homology, which can be high for large knowledge graphs<sup>80{\"chunk_id\":\"9\", \"paper_id\":\"616e37445244ab9dcbd1ab3d\"}</sup>. Another challenge is the interpretation of the topological features produced by persistent homology, which requires domain-specific knowledge. Despite these challenges, the potential benefits of faster and more efficient evaluation of knowledge graph completion\n",
      "## 5.2 Pre-trained Language Models\n",
      "\n",
      "In recent years, pre-trained language models have gained significant attention in the field of natural language processing<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. These models, such as BERT, GPT-3, and T5, are trained on large amounts of unlabeled text data and have demonstrated remarkable performance on various downstream tasks<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. The use of pre-trained language models in knowledge graph completion (KGC) has also shown promising results, with models like SimKGC outperforming embedding-based methods on several benchmark datasets. Additionally, approaches such as KGTransformer and MGTCA have contributed to advancements in KGC by incorporating graph structure transfer and mixed geometry message functions, respectively<sup>35{\"chunk_id\":\"11\", \"paper_id\":\"6694829701d2a3fbfc86645d\"}</sup>.\n",
      "\n",
      "One of the key advantages of using pre-trained language models for KGC is their ability to model long-range dependencies and capture complex interactions between entities and relations<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. This is particularly useful for knowledge graphs with intricate and multi-hop relationships<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. Moreover, pre-trained language models can be fine-tuned on domain-specific data, allowing them to adapt to the specific characteristics of the knowledge graph and improve performance on downstream tasks<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "However, the use of pre-trained language models in KGC also comes with challenges. One major challenge is the high computational cost associated with these models, especially for large-scale knowledge graphs. For instance, the complexity of computing cause-effect in a graph via Structural Causal Models (SCM) is exponential to the number of node/edge latent variables, while training a Neural Causal Model (NCM) is in polynomial time, addressing some of the scalability issues<sup>35{\"chunk_id\":\"11\", \"paper_id\":\"6694829701d2a3fbfc86645d\"}</sup>. However, applying such models to large graphs still presents practical scalability issues, which could be mitigated by using multi-threading/processing. Moreover, federated learning approaches face similar challenges, where data and model heterogeneity can lead to increased computational costs and performance degradation<sup>55{\"chunk_id\":\"1\", \"paper_id\":\"6516338d3fda6d7f065e5023\"}</sup>. The rapid generation of high-quality neural representations in neural rendering also comes with risks and costs, such as privacy and security concerns<sup>8{\"chunk_id\":\"5\", \"paper_id\":\"657134c4939a5f4082e4f323\"}</sup>. It is important to report efficiency metrics comprehensively to avoid partial conclusions about model efficiency<sup>83{\"chunk_id\":\"0\", \"paper_id\":\"617771c35244ab9dcbe79d60\"}</sup>, and to consider the computational costs associated with extending transformer-based models for tasks like context retrieval for Named Entity Recognition<sup>66{\"chunk_id\":\"4\", \"paper_id\":\"652dee7a939a5f4082b44782\"}</sup>.\n",
      "\n",
      "Additionally, the interpretability of these models can be limited, making it difficult to understand the reasoning behind their predictions<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. This issue is compounded by the complexity of real-world datasets and the challenges in evaluating explainable ML methods in application-grounded contexts. For instance, models may confuse visually similar numbers, struggle with high density object processing, or face difficulties in explaining sudden changes in geometric structures. Efforts to enhance interpretability are ongoing, with proposals for learning predictors by composing interpretable queries and the need for more rigorous evaluation approaches that consider real tasks, data, users, and inference strategies<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "These models can capture latent relationships between entities and relations in the knowledge graph, enabling more accurate prediction of missing links<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "## 6 Graph Neural Networks\n",
      "## 5.1 Persistent Homology\n",
      "\n",
      "Persistent homology, a method from algebraic topology, has emerged as a promising approach for efficient evaluation of knowledge graph completion methods. This is demonstrated in the paper by Bastos et al. where they introduce Knowledge Persistence ($\\mathcal{K P}$) as a novel method for faster evaluation of Knowledge Graph (KG) completion approaches, which utilizes persistent homology to represent the topology of KG completion methods. The effectiveness of persistent homology in various data analysis tasks, such as shape-analysis and image segmentation, is also highlighted in the works by Turkeš et al. and in the application of noise-aware topological consistency for histopathology images. Additionally, the survey by Dashti et al. and the work on cycle registration in persistent homology further underscore its utility in topological data analysis and supervised learning tasks. This unique capability allows persistent homology to evaluate the quality of knowledge graph completion by examining only a fraction of the data, thus reducing the quadratic complexity associated with ranking all KG triples<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. Moreover, its robustness to noise makes it a suitable metric for addressing the open-world problem in knowledge graphs. Experimental results have demonstrated a high correlation between persistent homology and traditional ranking metrics like Hits@N, MR, and MRR. For instance, the evaluation time for a KG completion method was reduced from 18 hours with Hits@10 to just 27 seconds with persistent homology, showcasing its potential for faster and more efficient evaluation<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. Despite challenges such as computational complexity and the need for domain-specific knowledge to interpret topological features, persistent homology remains a promising area for future research in knowledge graph completion evaluation.\n",
      "\n",
      "### 5.1.1 Theoretical Foundations\n",
      "\n",
      "The theoretical underpinnings of persistent homology lie in algebraic topology, which examines the properties of shapes and spaces using algebraic constructs. In the context of knowledge graph completion, persistent homology analyzes the topological structure of the data by tracking the emergence and vanishing of holes of various dimensions as the knowledge graph is completed. By studying the topological structure of this simplicial complex, persistent homology offers insights into the quality of the knowledge graph completion process.\n",
      "\n",
      "### 5.1.2 Implementation and Challenges\n",
      "\n",
      "Implementing persistent homology for knowledge graph completion evaluation requires several steps: representing the knowledge graph as a simplicial complex, computing topological features using persistent homology, and using these features to evaluate the quality of the knowledge graph completion. The computational complexity of computing persistent homology can be significant for large knowledge graphs, and the interpretation of the topological features produced requires domain-specific knowledge. Persistent homology has emerged as a powerful tool for analyzing the topology of various kinds of real-world data, including images, and it performs well in tasks such as detecting the number of holes, curvature, and convexity from 2D and 3D point clouds. However, its effectiveness also depends on computational resources, training data, and the domain-specific interpretation of the results. For instance, in the context of graph data, methods like Curvature Graph Generative Adversarial Networks (CurvGAN) attempt to learn robust representations that capture the underlying topological properties, highlighting the need for specialized approaches to handle the complexity and heterogeneity of the data<sup>9{\"chunk_id\":\"3\", \"paper_id\":\"6566a0f1939a5f408265ae0d\"}</sup><sup>95{\"chunk_id\":\"0\", \"paper_id\":\"62b288a35aee126c0fbd7b92\"}</sup><sup>36{\"chunk_id\":\"1\", \"paper_id\":\"6221834e5aee126c0f23c2cc\"}</sup>.\n",
      "\n",
      "### 5.1.3 Comparison with Ranking Metrics\n",
      "\n",
      "While persistent homology shows promise as an alternative evaluation metric for knowledge graph completion, it is important to compare its performance with traditional ranking metrics. Experimental results have shown that persistent homology is highly correlated with ranking metrics such as Hits@N, MR, and MRR, suggesting it can provide a reliable measure of the quality of knowledge graph completion methods. Therefore, it may be beneficial to combine persistent homology with other evaluation metrics to obtain a more comprehensive assessment of knowledge graph completion methods.\n",
      "\n",
      "In conclusion, persistent homology is a promising approach for faster and more efficient evaluation of knowledge graph completion methods. Its ability to capture the topological structure of knowledge graphs and its robustness to noise make it a valuable tool for researchers and practitioners in the field of knowledge graph completion.\n",
      "## 6.2 Graph vs. Sequence Representations for Knowledge-Grounded Dialogue\n",
      "\n",
      "In this subsection, we compare the performance of graph and sequence representations for knowledge-grounded dialogue. Knowledge graphs provide a rich source of structured information about entities and concepts, which can be leveraged to enhance the performance of dialogue systems<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup>. Graph representations can capture the complex relationships and interactions between entities and concepts, while sequence representations can model the temporal dynamics and coherence of dialogue<sup>62{\"chunk_id\":\"3\", \"paper_id\":\"6228477c5aee126c0f0c2072\"}</sup><sup>4{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>42{\"chunk_id\":\"1\", \"paper_id\":\"61a6e8995244ab9dcb50f4ed\"}</sup><sup>38{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup><sup>94{\"chunk_id\":\"5\", \"paper_id\":\"65ee659613fb2c6cf61acd26\"}</sup>.\n",
      "\n",
      "We investigate the use of graph neural networks (GNNs) for temporal reasoning in knowledge graphs. GNNs are a type of neural network designed to process graph-structured data. By aggregating information from neighboring nodes, GNNs can capture the local and global structure of graphs<sup>44{\"chunk_id\":\"1\", \"paper_id\":\"62df81d45aee126c0f872671\"}</sup><sup>47{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>63{\"chunk_id\":\"2\", \"paper_id\":\"63f2e4aa90e50fcafd282094\"}</sup><sup>67{\"chunk_id\":\"2\", \"paper_id\":\"65fb94f713fb2c6cf682ffd9\"}</sup><sup>58{\"chunk_id\":\"2\", \"paper_id\":\"61e8d30e5244ab9dcb5832ce\"}</sup>. In the context of knowledge graphs, GNNs can model the temporal evolution of entities and relationships, enabling more accurate prediction of future events and states<sup>49{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b64a4\"}</sup><sup>84{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}</sup><sup>5{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}</sup><sup>88{\"chunk_id\":\"1\", \"paper_id\":\"64af9a093fda6d7f065a6ec4\"}</sup>.\n",
      "\n",
      "We also explore the application of graph and sequence representations in knowledge-grounded dialogue. Graph representations can incorporate explicit knowledge from the knowledge graph into the dialogue generation process, allowing the system to generate responses that are not only relevant to the dialogue context but also provide additional information and explanations. On the other hand, sequence representations can model the temporal dynamics and coherence of dialogue, ensuring that the generated responses are contextually appropriate and informative<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"634e194790e50fcafd24f231\"}</sup><sup>13{\"chunk_id\":\"2\", \"paper_id\":\"6397ed4e90e50fcafdf440b9\"}</sup><sup>37{\"chunk_id\":\"5\", \"paper_id\":\"64e433243fda6d7f06010570\"}</sup><sup>1{\"chunk_id\":\"1\", \"paper_id\":\"640166a490e50fcafd68b2f5\"}</sup>.\n",
      "\n",
      "We present a survey on the use of graph and sequence representations in knowledge-grounded dialogue, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into dialogue systems. Finally, we analyze the impact of\n",
      "## 7 Conclusion \n",
      "This survey delves into the technology development roadmap for knowledge graph enhancement, focusing on the impact of enhancing textual information in knowledge graphs on downstream tasks such as Question Answering, Text Summarization, Entity Linking, and Word Sense Disambiguation<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. It showcases the effectiveness of knowledge graph enhancement (KGE) in improving the performance of state-of-the-art techniques in multilingual entity linking and knowledge graph completion<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. The survey also highlights the benefits of KGE for multilingual question answering<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> and its impact on multilingual entity linking and knowledge graph completion<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. Additionally, the study on Prix-LM demonstrates its effectiveness in capturing, propagating, and enriching knowledge in multilingual knowledge bases<sup>17{\"chunk_id\":\"5\", \"paper_id\":\"616e37435244ab9dcbd1a751\"}</sup>.\n",
      "\n",
      "Despite these advancements, the survey underscores the existing limitations and gaps in textual information coverage and precision between English and non-English languages in knowledge graphs. Supplemented with citations, this issue is addressed through the introduction of the task of Knowledge Graph Enhancement (KGE) and the development of a benchmark called WikiKGE-10 which evaluates KGE systems for entity names in 10 typologically diverse languages. The survey highlights the challenges in coverage and precision, with coverage being limited due to the availability of textual information in non-English languages, and precision being compromised by human errors, stale entries, and under-specific information. To bridge this gap, the paper proposes M-NTA, an unsupervised approach combining Machine Translation, Web Search, and Large Language Models, demonstrating its impact on downstream tasks like Entity Linking, Knowledge Graph Completion, and Question Answering<sup>90{\"chunk_id\":\"2\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>75{\"chunk_id\":\"0\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "\n",
      "It emphasizes the need for more extensive investigations to bridge this gap and achieve parity across languages. Furthermore, the survey discusses the limitations of current KGE approaches, such as the focus on specific types of textual information and the primary attention on Wikidata. It also highlights the need for future work to investigate the extent of the problem on other knowledge graphs and to create benchmarks for different types of textual information.\n",
      "\n",
      "In conclusion, the survey provides insights into the current state and future directions of KGE research, aiming to enhance the coverage and precision of textual information in multilingual knowledge graphs.\n",
      "\n",
      "# References\n",
      "\n",
      "[1] CTRLStruct: Dialogue Structure Learning for Open-Domain Response Generation WWW_2023_ chunk_1 id:640166a490e50fcafd68b2f5\n",
      "\n",
      "[2] Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_0 id:626754c85aee126c0fbcdd50\n",
      "\n",
      "[3] LLMRG: Improving Recommendations Through Large Language Model Reasoning Graphs AAAI2024 chunk_1 id:6602492813fb2c6cf676d713\n",
      "\n",
      "[4] Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_13 id:64fa84403fda6d7f06700712\n",
      "\n",
      "[5] Adaptive Path-Memory Network for Temporal Knowledge Graph Reasoning IJCAI2023 chunk_1 id:6448967571ac66d2cbd87664\n",
      "\n",
      "[6] Can Persistent Homology Provide an Efficient Alternative for Evaluation of Knowledge Graph Completion Methods? WWW_2023_ chunk_0 id:63d9d87390e50fcafd57e32c\n",
      "\n",
      "[7] Supervised Prototypical Contrastive Learning for Emotion Recognition in Conversation EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_1 id:634e194790e50fcafd24f231\n",
      "\n",
      "[8] HybridNeRF: Efficient Neural Rendering Via Adaptive Volumetric Surfaces CVPR2024 chunk_5 id:657134c4939a5f4082e4f323\n",
      "\n",
      "[9] Semi-supervised Segmentation of Histopathology Images with Noise-Aware Topological Consistency ECCV2024 chunk_3 id:6566a0f1939a5f408265ae0d\n",
      "\n",
      "[10] NativE: Multi-modal Knowledge Graph Completion in the Wild SIGIR2024 chunk_5 id:6614c27c13fb2c6cf65095af\n",
      "\n",
      "[11] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[12] Can Persistent Homology Provide an Efficient Alternative for Evaluation of Knowledge Graph Completion Methods? WWW_2023_ chunk_4 id:63d9d87390e50fcafd57e32c\n",
      "\n",
      "[13] CDialog: A Multi-turn Covid-19 Conversation Dataset for Entity-Aware Dialog Generation EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_2 id:6397ed4e90e50fcafdf440b9\n",
      "\n",
      "[14] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_5 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[15] Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_2 id:634e193f90e50fcafd24e2d7\n",
      "\n",
      "[16] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_6 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[17] Prix-LM: Pretraining for Multilingual Knowledge Base Construction ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_5 id:616e37435244ab9dcbd1a751\n",
      "\n",
      "[18] Re-thinking Knowledge Graph Completion Evaluation from an Information Retrieval Perspective. SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_2 id:6279c9c65aee126c0fdae32d\n",
      "\n",
      "[19] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_4 id:6576d783939a5f408200f903\n",
      "\n",
      "[20] ReSee: Responding Through Seeing Fine-grained Visual Knowledge in Open-domain Dialogue EMNLP_2023 chunk_5 id:646d863cd68f896efa09f112\n",
      "\n",
      "[21] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_0 id:6576d783939a5f408200f903\n",
      "\n",
      "[22] All in One: Multi-Task Prompting for Graph Neural Networks KDD2023 chunk_1 id:64a63bbad68f896efaec478f\n",
      "\n",
      "[23] Bayesian Inference with Complex Knowledge Graph Evidence AAAI2024 chunk_1 id:660253bd13fb2c6cf6088509\n",
      "\n",
      "[24] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_2 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[25] MetaCLUE: Towards Comprehensive Visual Metaphors Research CVPR_2023 chunk_5 id:63a2794890e50fcafd293f21\n",
      "\n",
      "[26] Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences EMNLP_2023 chunk_5 id:656e8e1a939a5f408286c92f\n",
      "\n",
      "[27] MM-FRec: Multi-Modal Enhanced Fashion Item Recommendation IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_2 id:6504d4033fda6d7f06ca7ddf\n",
      "\n",
      "[28] On the Effectiveness of Persistent Homology. NeurIPS_2022_Neural_Information_Processing_Systems chunk_2 id:62b288a35aee126c0fbd7b92\n",
      "\n",
      "[29] Well Begun is Half Done: Generator-agnostic Knowledge Pre-Selection for Knowledge-Grounded Dialogue EMNLP_2023 chunk_1 id:65275736939a5f4082a46c09\n",
      "\n",
      "[30] Towards Large-Scale Interpretable Knowledge Graph Reasoning for Dialogue Systems ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_1 id:62393e7e5aee126c0f125ea2\n",
      "\n",
      "[31] Knowledge-refined Denoising Network for Robust Recommendation. SIGIR2023 chunk_1 id:6456385bd68f896efacf2377\n",
      "\n",
      "[32] Detecting Spoilers in Movie Reviews with External Movie Knowledge and User Networks EMNLP_2023 chunk_2 id:644744fa71ac66d2cbf9b272\n",
      "\n",
      "[33] Attention Prompting on Image for Large Vision-Language Models ECCV2024 chunk_9 id:66f4cd3401d2a3fbfcbfac37\n",
      "\n",
      "[34] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_0 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[35] Graph Neural Network Causal Explanation Via Neural Causal Models ECCV2024 chunk_11 id:6694829701d2a3fbfc86645d\n",
      "\n",
      "[36] Curvature Graph Generative Adversarial Networks WWW_2022_The_Web_Conference chunk_1 id:6221834e5aee126c0f23c2cc\n",
      "\n",
      "[37] Can Language Models Learn to Listen? ICCV_2023 chunk_5 id:64e433243fda6d7f06010570\n",
      "\n",
      "[38] Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning WWW_2023_ chunk_4 id:6449f232582c1376bb223932\n",
      "\n",
      "[39] Attention-guided Multi-step Fusion: A Hierarchical Fusion Network for Multimodal Recommendation. SIGIR2023 chunk_1 id:644744fb71ac66d2cbf9bc82\n",
      "\n",
      "[40] Knowledge-Aware Explainable Reciprocal Recommendation AAAI2024 chunk_1 id:6602676513fb2c6cf6154a6a\n",
      "\n",
      "[41] SpherE: Expressive and Interpretable Knowledge Graph Embedding for Set Retrieval SIGIR2024 chunk_3 id:6631a2d501d2a3fbfc8c4a96\n",
      "\n",
      "[42] AirObject: A Temporally Evolving Graph Embedding for Object Identification CVPR_2022_IEEE_Conference_on_Computer_Vision_and_Pattern_Recognition chunk_1 id:61a6e8995244ab9dcb50f4ed\n",
      "\n",
      "[43] Beyond Shared Vocabulary: Increasing Representational Word Similarities Across Languages for Multilingual Machine Translation EMNLP_2023 chunk_4 id:646d8642d68f896efa0a2fc8\n",
      "\n",
      "[44] Generative Subgraph Contrast for Self-Supervised Graph Representation Learning ECCV_2022_European_Conference_on_Computer_Vision chunk_1 id:62df81d45aee126c0f872671\n",
      "\n",
      "[45] Language Representation Projection: Can We Transfer Factual Knowledge Across Languages in Multilingual Language Models? EMNLP_2023 chunk_2 id:654af002939a5f40823c8316\n",
      "\n",
      "[46] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_6 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[47] Geodesic Graph Neural Network for Efficient Graph Representation Learning. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1 id:633f98d290e50fcafd78de82\n",
      "\n",
      "[48] Achieving Conversational Goals with Unsupervised Post-hoc Knowledge Injection. ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_5 id:623a90055aee126c0f36c49f\n",
      "\n",
      "[49] A Differential Geometric View and Explainability of GNN on Evolving Graphs. ICLR_2023 chunk_1 id:63dcdb422c26941cf00b64a4\n",
      "\n",
      "[50] Augmented Commonsense Knowledge for Remote Object Grounding AAAI2024 chunk_4 id:66026b7c13fb2c6cf64c85b9\n",
      "\n",
      "[51] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_16 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[52] A Multi-task Multi-stage Transitional Training Framework for Neural Chat   Translation IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_0 id:63d7352390e50fcafda3044b\n",
      "\n",
      "[53] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_0 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[54] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_3 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[55] Generalizable Heterogeneous Federated Cross-Correlation and Instance Similarity Learning IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_1 id:6516338d3fda6d7f065e5023\n",
      "\n",
      "[56] Exploring the Individuality and Collectivity of Intents Behind Interactions for Graph Collaborative Filtering SIGIR2024 chunk_1 id:664568f601d2a3fbfcd97fde\n",
      "\n",
      "[57] Knowledge Graph Prompting for Multi-Document Question Answering AAAI2024 chunk_2 id:64e6d5a53fda6d7f0652a3cc\n",
      "\n",
      "[58] Adversarial Contrastive Learning for Evidence-aware Fake News Detection with Graph Neural Networks. WWW_2022_The_Web_Conference chunk_2 id:61e8d30e5244ab9dcb5832ce\n",
      "\n",
      "[59] UMIE: Unified Multimodal Information Extraction with Instruction Tuning AAAI2024 chunk_1 id:659cf440939a5f4082bb9afb\n",
      "\n",
      "[60] Information Screening Whilst Exploiting! Multimodal Relation Extraction with Feature Denoising and Multimodal Topic Modeling ACL_2023 chunk_1 id:646aecaad68f896efa05a77c\n",
      "\n",
      "[61] Knowledge Graph Self-Supervised Rationalization for Recommendation KDD2023 chunk_4 id:64a78f1fd68f896efa01e96a\n",
      "\n",
      "[62] Understanding the Long-Term Dynamics of Mobile App Usage Context via Graph Embedding IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_3 id:6228477c5aee126c0f0c2072\n",
      "\n",
      "[63] Search to Capture Long-range Dependency with Stacking GNNs for Graph Classification. WWW_2023_ chunk_2 id:63f2e4aa90e50fcafd282094\n",
      "\n",
      "[64] Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:626754c85aee126c0fbcdd50\n",
      "\n",
      "[65] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_11 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[66] Learning to Rank Context for Named Entity Recognition Using a Synthetic Dataset EMNLP_2023 chunk_4 id:652dee7a939a5f4082b44782\n",
      "\n",
      "[67] Building Optimal Neural Architectures Using Interpretable Knowledge CVPR2024 chunk_2 id:65fb94f713fb2c6cf682ffd9\n",
      "\n",
      "[68] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_8 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[69] Wikidata As a Seed for Web Extraction WWW_2023_ chunk_1 id:6449f232582c1376bb223931\n",
      "\n",
      "[70] Achieving Conversational Goals with Unsupervised Post-hoc Knowledge Injection. ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_0 id:623a90055aee126c0f36c49f\n",
      "\n",
      "[71] Structure Pre-training and Prompt Tuning for Knowledge Graph Transfer WWW_2023_ chunk_2 id:6407fd3e90e50fcafd27470f\n",
      "\n",
      "[72] Cycle Registration in Persistent Homology with Applications in Topological Bootstrap IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_3 id:5ff44b5b91e01130648dc4f0\n",
      "\n",
      "[73] SimKGC: Simple Contrastive Knowledge Graph Completion with Pre-trained Language Models ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_1 id:622577a75aee126c0f008e2a\n",
      "\n",
      "[74] KGDM: A Diffusion Model to Capture Multiple Relation Semantics for Knowledge Graph Embedding AAAI2024 chunk_1 id:6602894513fb2c6cf6d810ab\n",
      "\n",
      "[75] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_0 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[76] Knowledge Graph Contrastive Learning for Recommendation SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_2 id:62708f625aee126c0fa69413\n",
      "\n",
      "[77] Systematic Word Meta-Sense Extension EMNLP_2023 chunk_0 id:655ebe83939a5f4082bb6cd0\n",
      "\n",
      "[78] Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:634e193f90e50fcafd24e2d7\n",
      "\n",
      "[79] Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_1 id:64fa84403fda6d7f06700712\n",
      "\n",
      "[80] Topologically Regularized Data Embeddings. ICLR_2022_International_Conference_on_Learning_Representation chunk_9 id:616e37445244ab9dcbd1ab3d\n",
      "\n",
      "[81] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_1 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[82] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_1 id:6576d783939a5f408200f903\n",
      "\n",
      "[83] The Efficiency Misnomer ICLR_2022_International_Conference_on_Learning_Representation chunk_0 id:617771c35244ab9dcbe79d60\n",
      "\n",
      "[84] Provably Expressive Temporal Graph Networks. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1 id:633a52a190e50fcafd688ec1\n",
      "\n",
      "[85] Modeling User Behavior with Graph Convolution for Personalized Product Search WWW_2022_The_Web_Conference chunk_3 id:620b19c25aee126c0f7e64b0\n",
      "\n",
      "[86] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_15 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[87] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_16 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[88] WinGNN: Dynamic Graph Neural Networks with Random Gradient Aggregation Window KDD2023 chunk_1 id:64af9a093fda6d7f065a6ec4\n",
      "\n",
      "[89] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[90] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_2 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[91] Rethinking Graph Convolutional Networks in Knowledge Graph Completion WWW_2022_The_Web_Conference chunk_5 id:6209c8265aee126c0f1e81ff\n",
      "\n",
      "[92] ApeGNN: Node-Wise Adaptive Aggregation in GNNs for Recommendation WWW_2023_ chunk_1 id:6449f232582c1376bb223890\n",
      "\n",
      "[93] Graph Neural Prompting with Large Language Models AAAI2024 chunk_0 id:6514e2043fda6d7f062dc9a9\n",
      "\n",
      "[94] Towards Scene Graph Anticipation ECCV2024 chunk_5 id:65ee659613fb2c6cf61acd26\n",
      "\n",
      "[95] On the Effectiveness of Persistent Homology. NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:62b288a35aee126c0fbd7b92\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "survey_with_references = await citation_processor.process_sections(content,topic)\n",
    "print(survey_with_references)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "## 1 Introduction\n",
      "\n",
      "This research survey delves into the advancements and challenges in knowledge graph construction and applications<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. It encompasses a comprehensive overview of multi-modal knowledge graphs, which integrate textual and visual information to enhance machine understanding. The survey also investigates the enhancement of textual information in multilingual knowledge graphs, aiming to bridge the gap between English and non-English languages. Additionally, it explores the application of knowledge graphs in robust recommendation systems, passage re-ranking, and knowledge-grounded dialogue systems. Furthermore, the survey analyzes the incorporation of explicit knowledge from knowledge graphs into pre-trained language models. It also reviews the latest research on efficient evaluation methods for knowledge graph completion and the utilization of Wikidata as a seed for web extraction. Moreover, the survey discusses the application of graph neural networks for temporal knowledge graph reasoning and the comparison of graph and sequence representations for knowledge-grounded dialogue. Additionally, it covers the use of masked generation feature distillation for knowledge graph completion and the integration of explicit knowledge into pre-trained language models for passage re-ranking. Furthermore, the survey delves into the construction and application of multi-modal knowledge graphs, including the use of modality split and ensemble methods. It also explores the challenges and opportunities in continual knowledge graph embedding and the application of curriculum-enhanced attention distillation for training graph transformers. Lastly, the survey presents a large-scale financial dataset for graph anomaly detection, DGraph, and a motif-aware Riemannian graph neural network with generative-contrastive learning, MotifRGC. DGraph overcomes many limitations of current GAD datasets, containing about 3M nodes, 4M dynamic edges, and 1M ground-truth nodes, while MotifRGC addresses issues in Riemannian graph representation learning by capturing motif regularity in a diverse-curvature manifold without labels.\n",
      "\n",
      "## 2 Multi-Modal Knowledge Graphs\n",
      "## 2.1 Construction of Multi-Modal Knowledge Graphs\n",
      "\n",
      "In this subsection, we delve into the construction of multi-modal knowledge graphs (MKGs), which integrate textual and visual information to enhance machine understanding. Multi-Modal Knowledge Graphs (MKGs) integrate textual and visual information to enhance machine understanding, as evidenced by the survey on MMKGs by Xiangru Zhu et al. which defines and discusses the benefits of MMKGs in various multi-modal tasks, and the work of Xie et al. which proposes a hybrid transformer framework for unified multimodal KGC.<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> We review the methods for constructing MKGs by integrating textual and visual information. Methods for constructing MKGs by integrating textual and visual information are reviewed.<sup>87{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"16\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> <sup>11{\"paper_title\":\"Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion\", \"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup> <sup>10{\"paper_title\":\"NativE: Multi-modal Knowledge Graph Completion in the Wild\", \"chunk_id\":\"5\", \"paper_id\":\"6614c27c13fb2c6cf65095af\"}</sup> <sup>86{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"15\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup> Firstly, we discuss the construction of MKGs from images to symbols, where images are labeled with symbols in the knowledge graph. This process, known as symbol grounding, refers to the process of finding proper multi-modal data items such as images to denote a symbol knowledge exists in a traditional KG. Compared to the image labeling way, the symbol grounding way is more widely applied for MMKG construction. Most of the existing MMKGs are constructed in this way, as detailed in the survey 'Multi-Modal Knowledge Graph Construction and Application: A Survey' [6209c8295aee126c0f1e86c0]. Secondly, we explore the construction of MKGs from symbols to images, where symbols in the knowledge graph are grounded to corresponding images. This process, known as symbol grounding, refers to the process of finding proper multi-modal data items such as images to denote a symbol knowledge exists in a traditional KG. Compared to the image labeling way, the symbol grounding way is more widely applied for MMKG construction. Most of the existing MMKGs are constructed in this way, as listed in Table 2b of the survey by Xiangru Zhu et al.<sup>68{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"8\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>.\n",
      "\n",
      "We also investigate the challenges, progresses, and opportunities in correlating various symbol knowledge to their corresponding images. For instance, the MetaCLUE research initiative has proposed tasks that aim at evaluating progress on visual metaphor research, including Classification, Understanding (Retrieval, Captioning, VQA), Localization, and Generation<sup>25{\"paper_title\":\"MetaCLUE: Towards Comprehensive Visual Metaphors Research\", \"chunk_id\":\"5\", \"paper_id\":\"63a2794890e50fcafd293f21\"}</sup>. Additionally, the process of symbol grounding is crucial for multi-modal knowledge graph construction, where symbols are associated with appropriate images, and this is widely applied in various grounding tasks such as Entity Grounding, Concept Grounding, and Relation Grounding<sup>68{\"paper_title\":\"Multi-Modal Knowledge Graph Construction and Application: A Survey\", \"chunk_id\":\"8\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. Further, the development of image-based attention prompting for large vision-language models presents new research opportunities and potential impacts, such as reducing computational costs and addressing biases<sup>33{\"paper_title\":\"Attention Prompting on Image for Large Vision-Language Models\", \"chunk_id\":\"9\", \"paper_id\":\"66f4cd3401d2a3fbfcbfac37\"}</sup>. These advancements contribute to the broader goal of visual information extraction in the wild, where entities are modeled as semantic points to handle diverse and complex image structures<sup>{\"paper_title\":\"Modeling Entities As Semantic Points for Visual Information Extraction in the Wild\", \"chunk_id\":\"6\", \"paper_id\":\"641d14e090e50fcafdf73\n",
      "## 2.2 Application of Multi-Modal Knowledge Graphs\n",
      "\n",
      "In this subsection, we explore the application of multi-modal knowledge graphs (MKGs) in downstream tasks such as multimodal link prediction, named entity recognition, and relation extraction. Multi-modal knowledge graphs, as defined in a systematic review by Zhu et al. [6209c8295aee126c0f1e86c0], are semantic networks that include entities, concepts, and relationships, enhanced with visual information from images. Firstly, we discuss the use of MKGs in multimodal link prediction, where associated images enhance entity representation for missing triple prediction<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>. Secondly, we investigate the application of MKGs in multimodal named entity recognition (MNER) and multimodal relation extraction (MRE), where corresponding images complement textual contexts for entity and relation extraction<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup> <sup>59{\"chunk_id\":\"1\", \"paper_id\":\"659cf440939a5f4082bb9afb\"}</sup> <sup>60{\"chunk_id\":\"1\", \"paper_id\":\"646aecaad68f896efa05a77c\"}</sup>. Additionally, we present a survey on MKGs constructed by texts and images, providing definitions, preliminaries, and a comprehensive review on the construction and application of MKGs<sup>34{\"chunk_id\":\"0\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup><sup>51{\"chunk_id\":\"16\", \"paper_id\":\"6209c8295aee126c0f1e86c0\"}</sup>. Furthermore, we analyze the architecture universality and modality contradiction limitations of existing multimodal KGC models<sup>89{\"chunk_id\":\"1\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>. Finally, we propose MKGformer, a hybrid transformer framework for unified multimodal KGC, implementing multi-level fusion with coarse-grained prefix-guided interaction module and fine-grained correlation-aware fusion module in blocks of transformers<sup>16{\"chunk_id\":\"6\", \"paper_id\":\"627483fa5aee126c0f07e07a\"}</sup>.\n",
      "\n",
      "## 3 Enhancing Textual Information in Multilingual Knowledge Graphs\n",
      "## 3.1 The M-NTA Approach\n",
      "\n",
      "The M-NTA approach, which stands for Multi-source Naturalization, Translation, and Alignment, is a novel method designed to enhance the textual information in multilingual knowledge graphs<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. This approach addresses the disparity in coverage and precision of textual information between English and non-English languages<sup>90{\"chunk_id\":\"2\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>45{\"chunk_id\":\"2\", \"paper_id\":\"654af002939a5f40823c8316\"}</sup> <sup>26{\"chunk_id\":\"5\", \"paper_id\":\"656e8e1a939a5f408286c92f\"}</sup> <sup>43{\"chunk_id\":\"4\", \"paper_id\":\"646d8642d68f896efa0a2fc8\"}</sup>. M-NTA combines machine translation, web search, and large language models to generate high-quality textual information for non-English languages<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. It takes a three-step approach: naturalization, translation, and alignment. In the naturalization step, M-NTA retrieves the textual description of an entity in the source language from Wikidata and uses it to produce a natural language representation of the entity<sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. This allows M-NTA to rely on different representations for polysemous words<sup>77{\"chunk_id\":\"0\", \"paper_id\":\"655ebe83939a5f4082bb6cd0\"}</sup>. In the translation step, M-NTA translates the representation from the source language to the target language using a system<sup>52{\"chunk_id\":\"0\", \"paper_id\":\"63d7352390e50fcafda3044b\"}</sup>. Finally, in the alignment step, M-NTA aligns the translated output with the input to extract the entity name in the target language. M-NTA is transparent to the definition of a source system, allowing it to leverage any system that is able to produce the entity name in the target language. This approach has been shown to improve the coverage and precision of textual information in multilingual knowledge graphs, benefiting downstream applications such as entity linking, knowledge graph completion, and question answering<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "## 3.2 Impact on Downstream Tasks\n",
      "\n",
      "In this subsection, we demonstrate the beneficial impact of KGE on downstream tasks and its effectiveness in improving the performance of state-of-the-art techniques in multilingual Entity Linking and Knowledge Graph Completion; we also show that KGE is beneficial for multilingual Question Answering in Appendix E.\n",
      "\n",
      "Multilingual Entity Linking (MEL). A direct application of increasing the quantity and quality of textual information in a knowledge graph is MEL, the task of linking a textual mention to an entity in a multilingual knowledge base (Botha et al., 2020)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. We evaluate the impact of our work on mGENRE (De Cao et al., 2022)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, a state-of-the-art MEL system that fine-tunes mBART (Lewis et al., 2020) to autoregressively generate a Wikidata entity name for a mention in context. As noted by De Cao et al. (2022)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, mGENRE generates entity names by also copying relevant portions of the input mention; however, copying is not possible when the mention of the entity is in a language for which Wikidata does not feature any names. By increasing the coverage and precision of textual information in Wikidata, M-NTA provides mGENRE with a broader coverage of entity names in non-English languages, aiding mGENRE's capability to rely on copying mechanisms. Indeed, as we can see in Table 3, augmenting mGENRE with M-NTA brings an improvement of 1.2 points in F1 score on average in Wikinews-7, setting a new state-of-the-art on this benchmark.\n",
      "\n",
      "Multilingual Knowledge Graph Completion (MKGC). Another direct application of KGE is MKGC, the task of predicting missing links between two entities in a multilingual knowledge base (Chen et al., 2020a). Similarly to MEL, we evaluate the downstream impact of our work on a re-implementation of Align-KGC (SoftAsym)<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>, a state-of-the-art MKGC system originally proposed by Chakrabarti et al. (2022), which we rebuilt to use our entity names and descriptions to create mBERT-based entity embeddings. As shown in Table 4, using M-NTA to provide more and better entity names and descriptions allows the MKGC system to obtain a consistent improvement across non-English languages on DBP-5L (Chen et al., 2020a), i.e., +1.5 points in terms of Mean Reciprocal Rank (MRR), excluding English. We hypothesize that the larger part of this improvement comes from the fact that the entity descriptions generated by M-NTA are more informative, as suggested by the examples shown in Appendix C.7 (see Table 7)<sup>65{\"chunk_id\":\"11\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "\n",
      "In summary, our experiments demonstrate that KGE can effectively enhance the performance of state-of-the-art systems in downstream applications such as multilingual entity linking, knowledge graph completion, and question answering. These results underscore the importance of increasing the coverage and precision of textual information in multilingual knowledge graphs for enabling better and more inclusive multilingual applications.\n",
      "\n",
      "## 4 Applications of Knowledge Graphs\n",
      "## 4.1 Recommendation Systems\n",
      "\n",
      "In this subsection, we delve into the application of knowledge graphs in building robust recommendation systems. Knowledge graphs provide a rich source of structured information about entities, which can be leveraged to enhance the performance of recommendation algorithms<sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup><sup>74{\"chunk_id\":\"1\", \"paper_id\":\"6602894513fb2c6cf6d810ab\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>23{\"chunk_id\":\"1\", \"paper_id\":\"660253bd13fb2c6cf6088509\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. Firstly, we discuss the use of knowledge graphs for collaborative filtering, where the relationships between users and items are modeled as a graph. By analyzing the user-item interaction graph, we can identify communities of users with similar preferences and recommend items to users based on the preferences of their neighbors<sup>39{\"chunk_id\":\"1\", \"paper_id\":\"644744fb71ac66d2cbf9bc82\"}</sup><sup>27{\"chunk_id\":\"2\", \"paper_id\":\"6504d4033fda6d7f06ca7ddf\"}</sup><sup>56{\"chunk_id\":\"1\", \"paper_id\":\"664568f601d2a3fbfcd97fde\"}</sup><sup>76{\"chunk_id\":\"2\", \"paper_id\":\"62708f625aee126c0fa69413\"}</sup>. Secondly, we explore the application of knowledge graphs in content-based filtering, where the content of items is represented as a graph. By analyzing the content graph, we can identify similar items and recommend them to users based on their past preferences<sup>27{\"chunk_id\":\"2\", \"paper_id\":\"6504d4033fda6d7f06ca7ddf\"}</sup><sup>39{\"chunk_id\":\"1\", \"paper_id\":\"644744fb71ac66d2cbf9bc82\"}</sup><sup>85{\"chunk_id\":\"3\", \"paper_id\":\"620b19c25aee126c0f7e64b0\"}</sup>. Additionally, we present a survey on the use of knowledge graphs in recommendation systems, providing an overview of the different methods and their effectiveness. Techniques such as embedding-based methods, path-based methods, and GNN-based methods have been employed to enrich semantics, though they may not be specifically tailored for reciprocal recommendations. Explainable recommendation is also a key area with diverse styles, yet systems often generate generic explanations without considering the unique needs of each party in a reciprocal recommendation scenario. To address these challenges, various models like SpherE<sup>41{\"chunk_id\":\"3\", \"paper_id\":\"6631a2d501d2a3fbfc8c4a96\"}</sup>, KAERR<sup>40{\"chunk_id\":\"1\", \"paper_id\":\"6602676513fb2c6cf6154a6a\"}</sup>, KRDN<sup>31{\"chunk_id\":\"1\", \"paper_id\":\"6456385bd68f896efacf2377\"}</sup>, and ApeGNN<sup>92{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223890\"}</sup> have been proposed, demonstrating enhanced performance and addressing issues such as data sparsity, cold-start problems, and interaction noise. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into pre-trained language models for recommendation<sup>2{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup><sup>3{\"chunk_id\":\"1\", \"paper_id\":\"6602492813fb2c6cf676d713\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>22{\"chunk_id\":\"1\", \"paper_id\":\"64a63bbad68f896efaec478f\"}</sup><sup>93{\"chunk_id\":\"0\", \"paper_id\":\"6514e2043fda6d7f062dc9a9\"}</sup>. Finally, we analyze the impact of knowledge graphs on the robustness of recommendation systems, showing that the incorporation of knowledge graphs can improve the resistance of recommendation algorithms to adversarial attacks and provide more diverse and personalized recommendations<sup>{\"chunk_id\":\"4\", \"paper\n",
      "## 4.2 Passage Re-ranking\n",
      "\n",
      "In this subsection, we explore the integration of explicit knowledge from knowledge graphs into pre-trained language models for passage re-ranking. Firstly, we discuss the use of knowledge graphs to enhance the representation of passages by incorporating additional semantic information. By representing passages as nodes in a knowledge graph and modeling their relationships with other nodes, we can capture the rich semantic context of passages<sup>57{\"chunk_id\":\"2\", \"paper_id\":\"64e6d5a53fda6d7f0652a3cc\"}</sup> <sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>53{\"chunk_id\":\"0\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup> <sup>32{\"chunk_id\":\"2\", \"paper_id\":\"644744fa71ac66d2cbf9b272\"}</sup> <sup>50{\"chunk_id\":\"4\", \"paper_id\":\"66026b7c13fb2c6cf64c85b9\"}</sup>. This enhanced representation can then be used to re-rank passages in response to a query, improving the relevance of search results.\n",
      "\n",
      "Secondly, we investigate the application of knowledge graphs in passage re-ranking by integrating explicit knowledge into pre-trained language models. Pre-trained language models, such as BERT, capture a wealth of semantic information from large-scale text corpora. These models own a powerful ability on contextual text representation, which is learned through a pre-training step using tasks like Next Sentence Prediction and Masked Language Modeling, and transferred to the downstream NLP tasks. The Transformer network's attention mechanism allows it to learn the contextual relationships between words, resulting in high-quality language feature representations. Multilingual-BERT and XLM-RoBERTa are examples of such models, pre-trained on a vast amount of data in multiple languages, demonstrating the scalability and effectiveness of this approach.\n",
      "\n",
      "By fine-tuning these models on knowledge graph data, we can inject explicit knowledge into their representations, enabling them to better understand the relationships between entities and concepts<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup> <sup>78{\"chunk_id\":\"0\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup> <sup>71{\"chunk_id\":\"2\", \"paper_id\":\"6407fd3e90e50fcafd27470f\"}</sup> <sup>61{\"chunk_id\":\"4\", \"paper_id\":\"64a78f1fd68f896efa01e96a\"}</sup>. This integration of explicit knowledge can lead to more accurate passage re-ranking, as the models can leverage both the implicit knowledge captured during pre-training and the explicit knowledge provided by the knowledge graph<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "\n",
      "Additionally, we present a survey on the use of knowledge graphs in passage re-ranking, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into pre-trained language models for passage re-ranking. Explicit knowledge from knowledge graphs, such as synonyms and domain-specific knowledge, can help bridge the semantic gap between queries and passages, addressing the limitations of implicit knowledge extracted from noisy and heterogeneous data. Despite these challenges, recent studies have shown promising results in enhancing passage re-ranking with knowledge graphs, achieving state-of-the-art performance in both general and domain-specific data<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup><sup>2{\"chunk_id\":\"0\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "\n",
      "Finally, we analyze the impact of knowledge graphs on the robustness of passage re-ranking, showing that the incorporation of knowledge graphs can improve the resistance of passage re-ranking algorithms to adversarial attacks and provide more relevant search results<sup>64{\"chunk_id\":\"1\", \"paper_id\":\"626754c85aee126c0fbcdd50\"}</sup>.\n",
      "## 4.3 Knowledge-Grounded Dialogue Systems\n",
      "\n",
      "In this subsection, we investigate the application of knowledge graphs in knowledge-grounded dialogue systems. Knowledge graphs provide a rich source of structured information about entities and concepts, which can be leveraged to enhance the performance of dialogue systems<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup>. Firstly, we discuss the use of knowledge graphs to ground the dialogue context by linking entities and concepts mentioned in the dialogue to their corresponding nodes in the knowledge graph. This grounding enables the dialogue system to access additional semantic information about the entities and concepts, improving the understanding of the dialogue context.\n",
      "\n",
      "Secondly, we explore the application of knowledge graphs in generating informative and coherent responses in knowledge-grounded dialogue. By incorporating explicit knowledge from the knowledge graph into the dialogue generation process, the system can generate responses that are not only relevant to the dialogue context but also provide additional information and explanations<sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>53{\"chunk_id\":\"0\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>30{\"chunk_id\":\"1\", \"paper_id\":\"62393e7e5aee126c0f125ea2\"}</sup><sup>29{\"chunk_id\":\"1\", \"paper_id\":\"65275736939a5f4082a46c09\"}</sup><sup>82{\"chunk_id\":\"1\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup>. This incorporation of explicit knowledge can lead to more engaging and informative dialogue interactions<sup>48{\"chunk_id\":\"5\", \"paper_id\":\"623a90055aee126c0f36c49f\"}</sup><sup>20{\"chunk_id\":\"5\", \"paper_id\":\"646d863cd68f896efa09f112\"}</sup><sup>70{\"chunk_id\":\"0\", \"paper_id\":\"623a90055aee126c0f36c49f\"}</sup>.\n",
      "\n",
      "Additionally, we present a survey on the use of knowledge graphs in knowledge-grounded dialogue systems, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into dialogue systems. Finally, we analyze the impact of knowledge graphs on the robustness and informativeness of dialogue systems, showing that the incorporation of knowledge graphs can improve the system's ability to handle complex and diverse dialogue scenarios<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>19{\"chunk_id\":\"4\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>46{\"chunk_id\":\"6\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup>.\n",
      "\n",
      "## 5 Efficient Evaluation Methods for Knowledge Graph Completion\n",
      "## 5.1 Persistent Homology\n",
      "\n",
      "Persistent homology is a method from the field of algebraic topology that has been proposed as an efficient alternative for evaluating knowledge graph completion methods<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. It is based on the concept of topological data analysis and can capture the geometry of the manifold on which the representations of entities and relations in a knowledge graph reside<sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup>. This property allows persistent homology to evaluate the quality of knowledge graph completion methods by looking only at a fraction of the data, reducing the quadratic complexity of considering all the data points (KG triples in this case) for ranking. Furthermore, persistent homology is robust to noise, mitigating issues due to the open-world problem in knowledge graphs. Experimental results on standard datasets have shown that the proposed metric is highly correlated with ranking metrics (Hits@N, MR, MRR) and can reduce the evaluation time of a KG completion method from 18 hours (using Hits@10) to 27 seconds (using persistent homology)<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. This makes persistent homology a promising approach for faster evaluation of knowledge graph completion methods, enabling more efficient prototyping and development of KGE systems.\n",
      "\n",
      "### 5.1.1 Theoretical Foundations\n",
      "\n",
      "The theoretical foundations of persistent homology lie in algebraic topology, which studies the properties of shapes and spaces by using algebraic constructs. Persistent homology analyzes the topological structure of data by tracking the appearance and disappearance of holes of various dimensions in the data as it is transformed<sup>9{\"chunk_id\":\"3\", \"paper_id\":\"6566a0f1939a5f408265ae0d\"}</sup> <sup>28{\"chunk_id\":\"2\", \"paper_id\":\"62b288a35aee126c0fbd7b92\"}</sup> <sup>72{\"chunk_id\":\"3\", \"paper_id\":\"5ff44b5b91e01130648dc4f0\"}</sup>. For knowledge graph completion, this means tracking the connectivity patterns between entities and relations as the knowledge graph is completed<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> <sup>15{\"chunk_id\":\"2\", \"paper_id\":\"634e193f90e50fcafd24e2d7\"}</sup> <sup>91{\"chunk_id\":\"5\", \"paper_id\":\"6209c8265aee126c0f1e81ff\"}</sup> <sup>18{\"chunk_id\":\"2\", \"paper_id\":\"6279c9c65aee126c0fdae32d\"}</sup> <sup>73{\"chunk_id\":\"1\", \"paper_id\":\"622577a75aee126c0f008e2a\"}</sup>. The key idea is to represent the knowledge graph as a simplicial complex, where nodes represent entities, edges represent relations between entities, and higher-dimensional simplices represent more complex interactions. By analyzing the topological structure of this simplicial complex, persistent homology can provide insights into the quality of the knowledge graph completion.\n",
      "\n",
      "### 5.1.2 Implementation and Challenges\n",
      "\n",
      "Implementing persistent homology for knowledge graph completion evaluation requires several steps. First, graphs are constructed from positive and negative triples, with scores calculated by a Knowledge Graph Embedding (KGE) method as edge weights. Next, a filtration process is applied to these graphs, converting them into a lower-dimensional representation using persistent homology. Finally, the distance between the graphs is calculated using the Sliced Wasserstein distance to provide a final metric score. This method, termed Knowledge Persistence ($\\mathcal{KP}$), offers a more efficient evaluation alternative to traditional ranking metrics, reducing computational complexity from $O(|\\mathcal{E}|^2)$ to $O(|\\mathcal{E}|)$ and showing high correlation with ranking metrics (Hits@N, MR, MRR)<sup>12{\"chunk_id\":\"4\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. One challenge is the computational complexity of computing persistent homology, which can be high for large knowledge graphs<sup>80{\"chunk_id\":\"9\", \"paper_id\":\"616e37445244ab9dcbd1ab3d\"}</sup>. Another challenge is the interpretation of the topological features produced by persistent homology, which requires domain-specific knowledge. Despite these challenges, the potential benefits of faster and more efficient evaluation of knowledge graph completion\n",
      "## 5.2 Pre-trained Language Models\n",
      "\n",
      "In recent years, pre-trained language models have gained significant attention in the field of natural language processing<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. These models, such as BERT, GPT-3, and T5, are trained on large amounts of unlabeled text data and have demonstrated remarkable performance on various downstream tasks<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. The use of pre-trained language models in knowledge graph completion (KGC) has also shown promising results, with models like SimKGC outperforming embedding-based methods on several benchmark datasets. Additionally, approaches such as KGTransformer and MGTCA have contributed to advancements in KGC by incorporating graph structure transfer and mixed geometry message functions, respectively<sup>35{\"chunk_id\":\"11\", \"paper_id\":\"6694829701d2a3fbfc86645d\"}</sup>.\n",
      "\n",
      "One of the key advantages of using pre-trained language models for KGC is their ability to model long-range dependencies and capture complex interactions between entities and relations<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. This is particularly useful for knowledge graphs with intricate and multi-hop relationships<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. Moreover, pre-trained language models can be fine-tuned on domain-specific data, allowing them to adapt to the specific characteristics of the knowledge graph and improve performance on downstream tasks<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "However, the use of pre-trained language models in KGC also comes with challenges. One major challenge is the high computational cost associated with these models, especially for large-scale knowledge graphs. For instance, the complexity of computing cause-effect in a graph via Structural Causal Models (SCM) is exponential to the number of node/edge latent variables, while training a Neural Causal Model (NCM) is in polynomial time, addressing some of the scalability issues<sup>35{\"chunk_id\":\"11\", \"paper_id\":\"6694829701d2a3fbfc86645d\"}</sup>. However, applying such models to large graphs still presents practical scalability issues, which could be mitigated by using multi-threading/processing. Moreover, federated learning approaches face similar challenges, where data and model heterogeneity can lead to increased computational costs and performance degradation<sup>55{\"chunk_id\":\"1\", \"paper_id\":\"6516338d3fda6d7f065e5023\"}</sup>. The rapid generation of high-quality neural representations in neural rendering also comes with risks and costs, such as privacy and security concerns<sup>8{\"chunk_id\":\"5\", \"paper_id\":\"657134c4939a5f4082e4f323\"}</sup>. It is important to report efficiency metrics comprehensively to avoid partial conclusions about model efficiency<sup>83{\"chunk_id\":\"0\", \"paper_id\":\"617771c35244ab9dcbe79d60\"}</sup>, and to consider the computational costs associated with extending transformer-based models for tasks like context retrieval for Named Entity Recognition<sup>66{\"chunk_id\":\"4\", \"paper_id\":\"652dee7a939a5f4082b44782\"}</sup>.\n",
      "\n",
      "Additionally, the interpretability of these models can be limited, making it difficult to understand the reasoning behind their predictions<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>. This issue is compounded by the complexity of real-world datasets and the challenges in evaluating explainable ML methods in application-grounded contexts. For instance, models may confuse visually similar numbers, struggle with high density object processing, or face difficulties in explaining sudden changes in geometric structures. Efforts to enhance interpretability are ongoing, with proposals for learning predictors by composing interpretable queries and the need for more rigorous evaluation approaches that consider real tasks, data, users, and inference strategies<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "These models can capture latent relationships between entities and relations in the knowledge graph, enabling more accurate prediction of missing links<sup>X{\"chunk_id\":\"\", \"paper_id\":\"\"}</sup>.\n",
      "\n",
      "## 6 Graph Neural Networks\n",
      "## 5.1 Persistent Homology\n",
      "\n",
      "Persistent homology, a method from algebraic topology, has emerged as a promising approach for efficient evaluation of knowledge graph completion methods. This is demonstrated in the paper by Bastos et al. where they introduce Knowledge Persistence ($\\mathcal{K P}$) as a novel method for faster evaluation of Knowledge Graph (KG) completion approaches, which utilizes persistent homology to represent the topology of KG completion methods. The effectiveness of persistent homology in various data analysis tasks, such as shape-analysis and image segmentation, is also highlighted in the works by Turkeš et al. and in the application of noise-aware topological consistency for histopathology images. Additionally, the survey by Dashti et al. and the work on cycle registration in persistent homology further underscore its utility in topological data analysis and supervised learning tasks. This unique capability allows persistent homology to evaluate the quality of knowledge graph completion by examining only a fraction of the data, thus reducing the quadratic complexity associated with ranking all KG triples<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. Moreover, its robustness to noise makes it a suitable metric for addressing the open-world problem in knowledge graphs. Experimental results have demonstrated a high correlation between persistent homology and traditional ranking metrics like Hits@N, MR, and MRR. For instance, the evaluation time for a KG completion method was reduced from 18 hours with Hits@10 to just 27 seconds with persistent homology, showcasing its potential for faster and more efficient evaluation<sup>6{\"chunk_id\":\"0\", \"paper_id\":\"63d9d87390e50fcafd57e32c\"}</sup>. Despite challenges such as computational complexity and the need for domain-specific knowledge to interpret topological features, persistent homology remains a promising area for future research in knowledge graph completion evaluation.\n",
      "\n",
      "### 5.1.1 Theoretical Foundations\n",
      "\n",
      "The theoretical underpinnings of persistent homology lie in algebraic topology, which examines the properties of shapes and spaces using algebraic constructs. In the context of knowledge graph completion, persistent homology analyzes the topological structure of the data by tracking the emergence and vanishing of holes of various dimensions as the knowledge graph is completed. By studying the topological structure of this simplicial complex, persistent homology offers insights into the quality of the knowledge graph completion process.\n",
      "\n",
      "### 5.1.2 Implementation and Challenges\n",
      "\n",
      "Implementing persistent homology for knowledge graph completion evaluation requires several steps: representing the knowledge graph as a simplicial complex, computing topological features using persistent homology, and using these features to evaluate the quality of the knowledge graph completion. The computational complexity of computing persistent homology can be significant for large knowledge graphs, and the interpretation of the topological features produced requires domain-specific knowledge. Persistent homology has emerged as a powerful tool for analyzing the topology of various kinds of real-world data, including images, and it performs well in tasks such as detecting the number of holes, curvature, and convexity from 2D and 3D point clouds. However, its effectiveness also depends on computational resources, training data, and the domain-specific interpretation of the results. For instance, in the context of graph data, methods like Curvature Graph Generative Adversarial Networks (CurvGAN) attempt to learn robust representations that capture the underlying topological properties, highlighting the need for specialized approaches to handle the complexity and heterogeneity of the data<sup>9{\"chunk_id\":\"3\", \"paper_id\":\"6566a0f1939a5f408265ae0d\"}</sup><sup>95{\"chunk_id\":\"0\", \"paper_id\":\"62b288a35aee126c0fbd7b92\"}</sup><sup>36{\"chunk_id\":\"1\", \"paper_id\":\"6221834e5aee126c0f23c2cc\"}</sup>.\n",
      "\n",
      "### 5.1.3 Comparison with Ranking Metrics\n",
      "\n",
      "While persistent homology shows promise as an alternative evaluation metric for knowledge graph completion, it is important to compare its performance with traditional ranking metrics. Experimental results have shown that persistent homology is highly correlated with ranking metrics such as Hits@N, MR, and MRR, suggesting it can provide a reliable measure of the quality of knowledge graph completion methods. Therefore, it may be beneficial to combine persistent homology with other evaluation metrics to obtain a more comprehensive assessment of knowledge graph completion methods.\n",
      "\n",
      "In conclusion, persistent homology is a promising approach for faster and more efficient evaluation of knowledge graph completion methods. Its ability to capture the topological structure of knowledge graphs and its robustness to noise make it a valuable tool for researchers and practitioners in the field of knowledge graph completion.\n",
      "## 6.2 Graph vs. Sequence Representations for Knowledge-Grounded Dialogue\n",
      "\n",
      "In this subsection, we compare the performance of graph and sequence representations for knowledge-grounded dialogue. Knowledge graphs provide a rich source of structured information about entities and concepts, which can be leveraged to enhance the performance of dialogue systems<sup>21{\"chunk_id\":\"0\", \"paper_id\":\"6576d783939a5f408200f903\"}</sup><sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>79{\"chunk_id\":\"1\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>24{\"chunk_id\":\"2\", \"paper_id\":\"649d037cd68f896efa4567aa\"}</sup><sup>69{\"chunk_id\":\"1\", \"paper_id\":\"6449f232582c1376bb223931\"}</sup>. Graph representations can capture the complex relationships and interactions between entities and concepts, while sequence representations can model the temporal dynamics and coherence of dialogue<sup>62{\"chunk_id\":\"3\", \"paper_id\":\"6228477c5aee126c0f0c2072\"}</sup><sup>4{\"chunk_id\":\"13\", \"paper_id\":\"64fa84403fda6d7f06700712\"}</sup><sup>42{\"chunk_id\":\"1\", \"paper_id\":\"61a6e8995244ab9dcb50f4ed\"}</sup><sup>38{\"chunk_id\":\"4\", \"paper_id\":\"6449f232582c1376bb223932\"}</sup><sup>94{\"chunk_id\":\"5\", \"paper_id\":\"65ee659613fb2c6cf61acd26\"}</sup>.\n",
      "\n",
      "We investigate the use of graph neural networks (GNNs) for temporal reasoning in knowledge graphs. GNNs are a type of neural network designed to process graph-structured data. By aggregating information from neighboring nodes, GNNs can capture the local and global structure of graphs<sup>44{\"chunk_id\":\"1\", \"paper_id\":\"62df81d45aee126c0f872671\"}</sup><sup>47{\"chunk_id\":\"1\", \"paper_id\":\"633f98d290e50fcafd78de82\"}</sup><sup>63{\"chunk_id\":\"2\", \"paper_id\":\"63f2e4aa90e50fcafd282094\"}</sup><sup>67{\"chunk_id\":\"2\", \"paper_id\":\"65fb94f713fb2c6cf682ffd9\"}</sup><sup>58{\"chunk_id\":\"2\", \"paper_id\":\"61e8d30e5244ab9dcb5832ce\"}</sup>. In the context of knowledge graphs, GNNs can model the temporal evolution of entities and relationships, enabling more accurate prediction of future events and states<sup>49{\"chunk_id\":\"1\", \"paper_id\":\"63dcdb422c26941cf00b64a4\"}</sup><sup>84{\"chunk_id\":\"1\", \"paper_id\":\"633a52a190e50fcafd688ec1\"}</sup><sup>5{\"chunk_id\":\"1\", \"paper_id\":\"6448967571ac66d2cbd87664\"}</sup><sup>88{\"chunk_id\":\"1\", \"paper_id\":\"64af9a093fda6d7f065a6ec4\"}</sup>.\n",
      "\n",
      "We also explore the application of graph and sequence representations in knowledge-grounded dialogue. Graph representations can incorporate explicit knowledge from the knowledge graph into the dialogue generation process, allowing the system to generate responses that are not only relevant to the dialogue context but also provide additional information and explanations. On the other hand, sequence representations can model the temporal dynamics and coherence of dialogue, ensuring that the generated responses are contextually appropriate and informative<sup>7{\"chunk_id\":\"1\", \"paper_id\":\"634e194790e50fcafd24f231\"}</sup><sup>13{\"chunk_id\":\"2\", \"paper_id\":\"6397ed4e90e50fcafdf440b9\"}</sup><sup>37{\"chunk_id\":\"5\", \"paper_id\":\"64e433243fda6d7f06010570\"}</sup><sup>1{\"chunk_id\":\"1\", \"paper_id\":\"640166a490e50fcafd68b2f5\"}</sup>.\n",
      "\n",
      "We present a survey on the use of graph and sequence representations in knowledge-grounded dialogue, providing an overview of the different methods and their effectiveness. Furthermore, we discuss the challenges and opportunities in integrating explicit knowledge from knowledge graphs into dialogue systems. Finally, we analyze the impact of\n",
      "## 7 Conclusion \n",
      "This survey delves into the technology development roadmap for knowledge graph enhancement, focusing on the impact of enhancing textual information in knowledge graphs on downstream tasks such as Question Answering, Text Summarization, Entity Linking, and Word Sense Disambiguation<sup>81{\"chunk_id\":\"1\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. It showcases the effectiveness of knowledge graph enhancement (KGE) in improving the performance of state-of-the-art techniques in multilingual entity linking and knowledge graph completion<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. The survey also highlights the benefits of KGE for multilingual question answering<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup> and its impact on multilingual entity linking and knowledge graph completion<sup>14{\"chunk_id\":\"5\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>. Additionally, the study on Prix-LM demonstrates its effectiveness in capturing, propagating, and enriching knowledge in multilingual knowledge bases<sup>17{\"chunk_id\":\"5\", \"paper_id\":\"616e37435244ab9dcbd1a751\"}</sup>.\n",
      "\n",
      "Despite these advancements, the survey underscores the existing limitations and gaps in textual information coverage and precision between English and non-English languages in knowledge graphs. Supplemented with citations, this issue is addressed through the introduction of the task of Knowledge Graph Enhancement (KGE) and the development of a benchmark called WikiKGE-10 which evaluates KGE systems for entity names in 10 typologically diverse languages. The survey highlights the challenges in coverage and precision, with coverage being limited due to the availability of textual information in non-English languages, and precision being compromised by human errors, stale entries, and under-specific information. To bridge this gap, the paper proposes M-NTA, an unsupervised approach combining Machine Translation, Web Search, and Large Language Models, demonstrating its impact on downstream tasks like Entity Linking, Knowledge Graph Completion, and Question Answering<sup>90{\"chunk_id\":\"2\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>54{\"chunk_id\":\"3\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup><sup>75{\"chunk_id\":\"0\", \"paper_id\":\"65655a25939a5f4082bae77e\"}</sup>.\n",
      "\n",
      "It emphasizes the need for more extensive investigations to bridge this gap and achieve parity across languages. Furthermore, the survey discusses the limitations of current KGE approaches, such as the focus on specific types of textual information and the primary attention on Wikidata. It also highlights the need for future work to investigate the extent of the problem on other knowledge graphs and to create benchmarks for different types of textual information.\n",
      "\n",
      "In conclusion, the survey provides insights into the current state and future directions of KGE research, aiming to enhance the coverage and precision of textual information in multilingual knowledge graphs.\n",
      "\n",
      "# References\n",
      "\n",
      "[1] CTRLStruct: Dialogue Structure Learning for Open-Domain Response Generation WWW_2023_ chunk_1 id:640166a490e50fcafd68b2f5\n",
      "\n",
      "[2] Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_0 id:626754c85aee126c0fbcdd50\n",
      "\n",
      "[3] LLMRG: Improving Recommendations Through Large Language Model Reasoning Graphs AAAI2024 chunk_1 id:6602492813fb2c6cf676d713\n",
      "\n",
      "[4] Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_13 id:64fa84403fda6d7f06700712\n",
      "\n",
      "[5] Adaptive Path-Memory Network for Temporal Knowledge Graph Reasoning IJCAI2023 chunk_1 id:6448967571ac66d2cbd87664\n",
      "\n",
      "[6] Can Persistent Homology Provide an Efficient Alternative for Evaluation of Knowledge Graph Completion Methods? WWW_2023_ chunk_0 id:63d9d87390e50fcafd57e32c\n",
      "\n",
      "[7] Supervised Prototypical Contrastive Learning for Emotion Recognition in Conversation EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_1 id:634e194790e50fcafd24f231\n",
      "\n",
      "[8] HybridNeRF: Efficient Neural Rendering Via Adaptive Volumetric Surfaces CVPR2024 chunk_5 id:657134c4939a5f4082e4f323\n",
      "\n",
      "[9] Semi-supervised Segmentation of Histopathology Images with Noise-Aware Topological Consistency ECCV2024 chunk_3 id:6566a0f1939a5f408265ae0d\n",
      "\n",
      "[10] NativE: Multi-modal Knowledge Graph Completion in the Wild SIGIR2024 chunk_5 id:6614c27c13fb2c6cf65095af\n",
      "\n",
      "[11] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[12] Can Persistent Homology Provide an Efficient Alternative for Evaluation of Knowledge Graph Completion Methods? WWW_2023_ chunk_4 id:63d9d87390e50fcafd57e32c\n",
      "\n",
      "[13] CDialog: A Multi-turn Covid-19 Conversation Dataset for Entity-Aware Dialog Generation EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing chunk_2 id:6397ed4e90e50fcafdf440b9\n",
      "\n",
      "[14] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_5 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[15] Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_2 id:634e193f90e50fcafd24e2d7\n",
      "\n",
      "[16] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_6 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[17] Prix-LM: Pretraining for Multilingual Knowledge Base Construction ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_5 id:616e37435244ab9dcbd1a751\n",
      "\n",
      "[18] Re-thinking Knowledge Graph Completion Evaluation from an Information Retrieval Perspective. SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_2 id:6279c9c65aee126c0fdae32d\n",
      "\n",
      "[19] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_4 id:6576d783939a5f408200f903\n",
      "\n",
      "[20] ReSee: Responding Through Seeing Fine-grained Visual Knowledge in Open-domain Dialogue EMNLP_2023 chunk_5 id:646d863cd68f896efa09f112\n",
      "\n",
      "[21] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_0 id:6576d783939a5f408200f903\n",
      "\n",
      "[22] All in One: Multi-Task Prompting for Graph Neural Networks KDD2023 chunk_1 id:64a63bbad68f896efaec478f\n",
      "\n",
      "[23] Bayesian Inference with Complex Knowledge Graph Evidence AAAI2024 chunk_1 id:660253bd13fb2c6cf6088509\n",
      "\n",
      "[24] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_2 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[25] MetaCLUE: Towards Comprehensive Visual Metaphors Research CVPR_2023 chunk_5 id:63a2794890e50fcafd293f21\n",
      "\n",
      "[26] Explaining with Contrastive Phrasal Highlighting: A Case Study in Assisting Humans to Detect Translation Differences EMNLP_2023 chunk_5 id:656e8e1a939a5f408286c92f\n",
      "\n",
      "[27] MM-FRec: Multi-Modal Enhanced Fashion Item Recommendation IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_2 id:6504d4033fda6d7f06ca7ddf\n",
      "\n",
      "[28] On the Effectiveness of Persistent Homology. NeurIPS_2022_Neural_Information_Processing_Systems chunk_2 id:62b288a35aee126c0fbd7b92\n",
      "\n",
      "[29] Well Begun is Half Done: Generator-agnostic Knowledge Pre-Selection for Knowledge-Grounded Dialogue EMNLP_2023 chunk_1 id:65275736939a5f4082a46c09\n",
      "\n",
      "[30] Towards Large-Scale Interpretable Knowledge Graph Reasoning for Dialogue Systems ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_1 id:62393e7e5aee126c0f125ea2\n",
      "\n",
      "[31] Knowledge-refined Denoising Network for Robust Recommendation. SIGIR2023 chunk_1 id:6456385bd68f896efacf2377\n",
      "\n",
      "[32] Detecting Spoilers in Movie Reviews with External Movie Knowledge and User Networks EMNLP_2023 chunk_2 id:644744fa71ac66d2cbf9b272\n",
      "\n",
      "[33] Attention Prompting on Image for Large Vision-Language Models ECCV2024 chunk_9 id:66f4cd3401d2a3fbfcbfac37\n",
      "\n",
      "[34] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_0 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[35] Graph Neural Network Causal Explanation Via Neural Causal Models ECCV2024 chunk_11 id:6694829701d2a3fbfc86645d\n",
      "\n",
      "[36] Curvature Graph Generative Adversarial Networks WWW_2022_The_Web_Conference chunk_1 id:6221834e5aee126c0f23c2cc\n",
      "\n",
      "[37] Can Language Models Learn to Listen? ICCV_2023 chunk_5 id:64e433243fda6d7f06010570\n",
      "\n",
      "[38] Learning Long- and Short-term Representations for Temporal Knowledge Graph Reasoning WWW_2023_ chunk_4 id:6449f232582c1376bb223932\n",
      "\n",
      "[39] Attention-guided Multi-step Fusion: A Hierarchical Fusion Network for Multimodal Recommendation. SIGIR2023 chunk_1 id:644744fb71ac66d2cbf9bc82\n",
      "\n",
      "[40] Knowledge-Aware Explainable Reciprocal Recommendation AAAI2024 chunk_1 id:6602676513fb2c6cf6154a6a\n",
      "\n",
      "[41] SpherE: Expressive and Interpretable Knowledge Graph Embedding for Set Retrieval SIGIR2024 chunk_3 id:6631a2d501d2a3fbfc8c4a96\n",
      "\n",
      "[42] AirObject: A Temporally Evolving Graph Embedding for Object Identification CVPR_2022_IEEE_Conference_on_Computer_Vision_and_Pattern_Recognition chunk_1 id:61a6e8995244ab9dcb50f4ed\n",
      "\n",
      "[43] Beyond Shared Vocabulary: Increasing Representational Word Similarities Across Languages for Multilingual Machine Translation EMNLP_2023 chunk_4 id:646d8642d68f896efa0a2fc8\n",
      "\n",
      "[44] Generative Subgraph Contrast for Self-Supervised Graph Representation Learning ECCV_2022_European_Conference_on_Computer_Vision chunk_1 id:62df81d45aee126c0f872671\n",
      "\n",
      "[45] Language Representation Projection: Can We Transfer Factual Knowledge Across Languages in Multilingual Language Models? EMNLP_2023 chunk_2 id:654af002939a5f40823c8316\n",
      "\n",
      "[46] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_6 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[47] Geodesic Graph Neural Network for Efficient Graph Representation Learning. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1 id:633f98d290e50fcafd78de82\n",
      "\n",
      "[48] Achieving Conversational Goals with Unsupervised Post-hoc Knowledge Injection. ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_5 id:623a90055aee126c0f36c49f\n",
      "\n",
      "[49] A Differential Geometric View and Explainability of GNN on Evolving Graphs. ICLR_2023 chunk_1 id:63dcdb422c26941cf00b64a4\n",
      "\n",
      "[50] Augmented Commonsense Knowledge for Remote Object Grounding AAAI2024 chunk_4 id:66026b7c13fb2c6cf64c85b9\n",
      "\n",
      "[51] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_16 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[52] A Multi-task Multi-stage Transitional Training Framework for Neural Chat   Translation IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_0 id:63d7352390e50fcafda3044b\n",
      "\n",
      "[53] Enhancing Dialogue Generation via Dynamic Graph Knowledge Aggregation ACL_2023 chunk_0 id:649d037cd68f896efa4567aa\n",
      "\n",
      "[54] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_3 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[55] Generalizable Heterogeneous Federated Cross-Correlation and Instance Similarity Learning IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_1 id:6516338d3fda6d7f065e5023\n",
      "\n",
      "[56] Exploring the Individuality and Collectivity of Intents Behind Interactions for Graph Collaborative Filtering SIGIR2024 chunk_1 id:664568f601d2a3fbfcd97fde\n",
      "\n",
      "[57] Knowledge Graph Prompting for Multi-Document Question Answering AAAI2024 chunk_2 id:64e6d5a53fda6d7f0652a3cc\n",
      "\n",
      "[58] Adversarial Contrastive Learning for Evidence-aware Fake News Detection with Graph Neural Networks. WWW_2022_The_Web_Conference chunk_2 id:61e8d30e5244ab9dcb5832ce\n",
      "\n",
      "[59] UMIE: Unified Multimodal Information Extraction with Instruction Tuning AAAI2024 chunk_1 id:659cf440939a5f4082bb9afb\n",
      "\n",
      "[60] Information Screening Whilst Exploiting! Multimodal Relation Extraction with Feature Denoising and Multimodal Topic Modeling ACL_2023 chunk_1 id:646aecaad68f896efa05a77c\n",
      "\n",
      "[61] Knowledge Graph Self-Supervised Rationalization for Recommendation KDD2023 chunk_4 id:64a78f1fd68f896efa01e96a\n",
      "\n",
      "[62] Understanding the Long-Term Dynamics of Mobile App Usage Context via Graph Embedding IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_3 id:6228477c5aee126c0f0c2072\n",
      "\n",
      "[63] Search to Capture Long-range Dependency with Stacking GNNs for Graph Classification. WWW_2023_ chunk_2 id:63f2e4aa90e50fcafd282094\n",
      "\n",
      "[64] Incorporating Explicit Knowledge in Pre-trained Language Models for Passage Re-ranking SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:626754c85aee126c0fbcdd50\n",
      "\n",
      "[65] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_11 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[66] Learning to Rank Context for Named Entity Recognition Using a Synthetic Dataset EMNLP_2023 chunk_4 id:652dee7a939a5f4082b44782\n",
      "\n",
      "[67] Building Optimal Neural Architectures Using Interpretable Knowledge CVPR2024 chunk_2 id:65fb94f713fb2c6cf682ffd9\n",
      "\n",
      "[68] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_8 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[69] Wikidata As a Seed for Web Extraction WWW_2023_ chunk_1 id:6449f232582c1376bb223931\n",
      "\n",
      "[70] Achieving Conversational Goals with Unsupervised Post-hoc Knowledge Injection. ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_0 id:623a90055aee126c0f36c49f\n",
      "\n",
      "[71] Structure Pre-training and Prompt Tuning for Knowledge Graph Transfer WWW_2023_ chunk_2 id:6407fd3e90e50fcafd27470f\n",
      "\n",
      "[72] Cycle Registration in Persistent Homology with Applications in Topological Bootstrap IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence chunk_3 id:5ff44b5b91e01130648dc4f0\n",
      "\n",
      "[73] SimKGC: Simple Contrastive Knowledge Graph Completion with Pre-trained Language Models ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics chunk_1 id:622577a75aee126c0f008e2a\n",
      "\n",
      "[74] KGDM: A Diffusion Model to Capture Multiple Relation Semantics for Knowledge Graph Embedding AAAI2024 chunk_1 id:6602894513fb2c6cf6d810ab\n",
      "\n",
      "[75] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_0 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[76] Knowledge Graph Contrastive Learning for Recommendation SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_2 id:62708f625aee126c0fa69413\n",
      "\n",
      "[77] Systematic Word Meta-Sense Extension EMNLP_2023 chunk_0 id:655ebe83939a5f4082bb6cd0\n",
      "\n",
      "[78] Inductive Logical Query Answering in Knowledge Graphs. NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:634e193f90e50fcafd24e2d7\n",
      "\n",
      "[79] Temporal Inductive Path Neural Network for Temporal Knowledge Graph Reasoning Artificial_Intelligence chunk_1 id:64fa84403fda6d7f06700712\n",
      "\n",
      "[80] Topologically Regularized Data Embeddings. ICLR_2022_International_Conference_on_Learning_Representation chunk_9 id:616e37445244ab9dcbd1ab3d\n",
      "\n",
      "[81] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_1 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[82] Graph Vs. Sequence: an Empirical Study on Knowledge Forms for Knowledge-Grounded Dialogue EMNLP_2023 chunk_1 id:6576d783939a5f408200f903\n",
      "\n",
      "[83] The Efficiency Misnomer ICLR_2022_International_Conference_on_Learning_Representation chunk_0 id:617771c35244ab9dcbe79d60\n",
      "\n",
      "[84] Provably Expressive Temporal Graph Networks. NeurIPS_2022_Neural_Information_Processing_Systems chunk_1 id:633a52a190e50fcafd688ec1\n",
      "\n",
      "[85] Modeling User Behavior with Graph Convolution for Personalized Product Search WWW_2022_The_Web_Conference chunk_3 id:620b19c25aee126c0f7e64b0\n",
      "\n",
      "[86] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_15 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[87] Multi-Modal Knowledge Graph Construction and Application: A Survey IEEE_Transactions_on_Knowledge_and_Data_Engineering chunk_16 id:6209c8295aee126c0f1e86c0\n",
      "\n",
      "[88] WinGNN: Dynamic Graph Neural Networks with Random Gradient Aggregation Window KDD2023 chunk_1 id:64af9a093fda6d7f065a6ec4\n",
      "\n",
      "[89] Hybrid Transformer with Multi-level Fusion for Multimodal Knowledge Graph Completion SIGIR_2022_Special_Interest_Group_on_Information_Retrieval chunk_1 id:627483fa5aee126c0f07e07a\n",
      "\n",
      "[90] Increasing Coverage and Precision of Textual Information in Multilingual Knowledge Graphs EMNLP_2023 chunk_2 id:65655a25939a5f4082bae77e\n",
      "\n",
      "[91] Rethinking Graph Convolutional Networks in Knowledge Graph Completion WWW_2022_The_Web_Conference chunk_5 id:6209c8265aee126c0f1e81ff\n",
      "\n",
      "[92] ApeGNN: Node-Wise Adaptive Aggregation in GNNs for Recommendation WWW_2023_ chunk_1 id:6449f232582c1376bb223890\n",
      "\n",
      "[93] Graph Neural Prompting with Large Language Models AAAI2024 chunk_0 id:6514e2043fda6d7f062dc9a9\n",
      "\n",
      "[94] Towards Scene Graph Anticipation ECCV2024 chunk_5 id:65ee659613fb2c6cf61acd26\n",
      "\n",
      "[95] On the Effectiveness of Persistent Homology. NeurIPS_2022_Neural_Information_Processing_Systems chunk_0 id:62b288a35aee126c0fbd7b92\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(survey_with_references)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
