{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "notebookRunGroups": {
     "groupValue": "2"
    }
   },
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import re\n",
    "import nest_asyncio\n",
    "nest_asyncio.apply()\n",
    "import sys\n",
    "import os\n",
    "\n",
    "# 添加项目根目录到Python路径\n",
    "sys.path.append(os.path.abspath('../..'))  # 修改这行，向上追溯三层目录到项目根目录\n",
    "\n",
    "\n",
    "topic = \"what does the technological roadmap of information extraction technology look like?\"\n",
    "with open(r\"D:\\GoodStudy\\FX15_research_agent\\summary-generation-match\\research_agent\\scripts\\1 copy.md\", \"r\", encoding=\"utf-8\") as file:\n",
    "    content = file.read()\n",
    "import re\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[WinError 3] 系统找不到指定的路径。: 'draft_iteration_output_dir\\\\20250305_181238'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[2], line 5\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mdatetime\u001b[39;00m\n\u001b[0;32m      4\u001b[0m \u001b[38;5;66;03m# 创建pipeline实例\u001b[39;00m\n\u001b[1;32m----> 5\u001b[0m pipeline \u001b[38;5;241m=\u001b[39m \u001b[43mCitationPipeline\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m      6\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtopic\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtopic\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      7\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcontent\u001b[49m\n\u001b[0;32m      8\u001b[0m \u001b[43m)\u001b[49m\n",
      "File \u001b[1;32md:\\GoodStudy\\FX15_research_agent\\summary-generation-match\\research_agent\\core\\pipeline_reference.py:59\u001b[0m, in \u001b[0;36mCitationPipeline.__init__\u001b[1;34m(self, topic, content, min_length, max_length, draft_iteration_output_dir)\u001b[0m\n\u001b[0;32m     57\u001b[0m \u001b[38;5;66;03m# 确保在使用之前定义 draft_iteration_output_dir\u001b[39;00m\n\u001b[0;32m     58\u001b[0m draft_iteration_output_dir \u001b[38;5;241m=\u001b[39m Path(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdraft_iteration_output_dir/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtimestamp\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m---> 59\u001b[0m \u001b[43mdraft_iteration_output_dir\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43mexist_ok\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[0;32m     60\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdraft_iteration_output_dir \u001b[38;5;241m=\u001b[39m draft_iteration_output_dir\n",
      "File \u001b[1;32md:\\nlpD\\envs\\pytorch\\lib\\pathlib.py:1323\u001b[0m, in \u001b[0;36mPath.mkdir\u001b[1;34m(self, mode, parents, exist_ok)\u001b[0m\n\u001b[0;32m   1319\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m   1320\u001b[0m \u001b[38;5;124;03mCreate a new directory at this given path.\u001b[39;00m\n\u001b[0;32m   1321\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m   1322\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m-> 1323\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_accessor\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmkdir\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1324\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m:\n\u001b[0;32m   1325\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m parents \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparent \u001b[38;5;241m==\u001b[39m \u001b[38;5;28mself\u001b[39m:\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [WinError 3] 系统找不到指定的路径。: 'draft_iteration_output_dir\\\\20250305_181238'"
     ]
    }
   ],
   "source": [
    "from research_agent.core.pipeline_reference import CitationPipeline\n",
    "import datetime\n",
    "\n",
    "# 创建pipeline实例\n",
    "pipeline = CitationPipeline(\n",
    "    topic=topic,\n",
    "    content=content\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "final_survey = await pipeline.pipeline_reference()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import List, Tuple\n",
    "\n",
    "\n",
    "def update_sections(self, merged_sections: List[str], results: List[List[Tuple[int, str]]]) -> str:\n",
    "    \"\"\"\n",
    "    更新段落中的句子内容。\n",
    "\n",
    "    Args:\n",
    "        merged_sections (List[str]): 合并后的段落列表。\n",
    "        results (List[List[Tuple[int, str]]]): 每个段落对应的更新结果，包含句子索引和更新内容。\n",
    "\n",
    "    Returns:\n",
    "        str: 更新后的新段落内容。\n",
    "    \"\"\"\n",
    "    new_section = []\n",
    "\n",
    "    for i, section in enumerate(merged_sections):\n",
    "        update_section = section\n",
    "        sentences = sent_tokenize(update_section)\n",
    "\n",
    "        # 遍历当前 section 对应的结果\n",
    "        for result in results[i]:\n",
    "            update_section = re.sub(re.escape(sentences[result[0]]),lambda m: result[1],update_section)\n",
    "\n",
    "        new_section.append(update_section)\n",
    "\n",
    "    # 返回更新后的新段落\n",
    "    return \"\".join(new_section)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\\\(2021\\\\)\\\\ performed\\\\ structured\\\\ pruning\\\\ with\\\\ \\\\$\\\\\\\\ell_\\\\{1\\\\}\\\\$\\\\ sparse\\\\ regularization'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "re.escape(\"(2021) performed structured pruning with $\\ell_{1}$ sparse regularization\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\(2021\\)\\ performed\\ structured\\ \\\n",
      "pruning\\ with\\ \\$\\\\ell_\\{1\\}\\$\\ sparse\\ regularization<///111>\n"
     ]
    }
   ],
   "source": [
    "print(re.escape(\"(2021) performed structured \\npruning with $\\ell_{1}$ sparse regularization<///111>\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Pruning has been successfully applied to compress language models, including BERT. McCarley et al. (2019) proposed pruning attention heads with less contribution. Wang et al. (2020) used low-rank factorization and $\\\\ell_{0}$ regularization for pruning. Sanh et al. (2020) introduced an improved magnitude pruning for transfer learning, and Chen et al. (2021) performed structured pruning with $\\\\ell_{1}$ sparse regularization, reducing parameters and training cost.'"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import re\n",
    "re.sub(\n",
    "    re.escape(\"(2021) performed \\t structured \\npruning with $\\ell_{1}$ sparse regularization\"),\n",
    "    lambda m: \"(2021) performed structured pruning with $\\ell_{1}$ sparse regularization<///111>\",\n",
    "    \"Pruning has been successfully applied to compress language models, including BERT. McCarley et al. (2019) proposed pruning attention heads with less contribution. Wang et al. (2020) used low-rank factorization and $\\ell_{0}$ regularization for pruning. Sanh et al. (2020) introduced an improved magnitude pruning for transfer learning, and Chen et al. (2021) performed structured pruning with $\\ell_{1}$ sparse regularization, reducing parameters and training cost.\"\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将结果存储为时间+名称.md\n",
    "timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n",
    "filename = f\"{timestamp}_final_survey.md\"\n",
    "with open(filename, \"w\", encoding=\"utf-8\") as f:\n",
    "    f.write(final_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from research_agent.core.pipeline_reference import CitationPipeline\n",
    "citation_pipeline = CitationPipeline(\n",
    "    topic=topic,\n",
    "    content=content\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:开始处理引用pipeline\n",
      "INFO:asyncio:已合并段落\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "19"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from asyncio.log import logger\n",
    "\n",
    "\n",
    "logger.info(\"开始处理引用pipeline\")\n",
    "pipeline.split_by_primary_headers()\n",
    "pipeline.merge_sections()\n",
    "logger.info(\"已合并段落\")\n",
    "len(pipeline.merged_sections)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:asyncio:已完成find_statement\n"
     ]
    }
   ],
   "source": [
    "\n",
    "await pipeline.find_statement_citations()\n",
    "logger.info(\"已完成find_statement\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:已完成prepare_draft_info\n"
     ]
    }
   ],
   "source": [
    "\n",
    "await pipeline.prepare_draft_info()\n",
    "logger.info(\"已完成prepare_draft_info\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'CitationPipeline' object has no attribute 'config'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[8], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mpipeline\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241m.\u001b[39mRERANK_API_KEY \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124ma4e980e3397543dfa045ea213026d227.tS2CEMLUqPmXdIqu\u001b[39m\u001b[38;5;124m\"\u001b[39m\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'CitationPipeline' object has no attribute 'config'"
     ]
    }
   ],
   "source": [
    "pipeline.config.RERANK_API_KEY = \"a4e980e3397543dfa045ea213026d227.tS2CEMLUqPmXdIqu\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'a4e980e3397543dfa045ea213026d227.tS2CEMLUqPmXdIqu'"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pipeline.processor.embedding_model.configs.RERANK_API_KEY = \"a4e980e3397543dfa045ea213026d227.tS2CEMLUqPmXdIqu\"\n",
    "pipeline.processor.embedding_model.configs.RERANK_API_KEY"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "results = await pipeline.process_drafts()\n",
    "logger.info(\"已完成process_drafts\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "new_content = self.update_sections(self.merged_sections, results)\n",
    "logger.info(\"已完成update_sections\")\n",
    "\n",
    "new_content, reference_list = self.replace_citations_with_num(\n",
    "    new_content)\n",
    "logger.info(\"已完成replace_citations_with_num\")\n",
    "\n",
    "final_survey = new_content + \"\\n\\n\" + \\\n",
    "    \"# References\\n\\n\" + \"\\n\".join(reference_list)\n",
    "final_survey = self.process_final_survey(final_survey)\n",
    "logger.info(\"已完成process_final_survey\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:asyncio:已完成find_statement\n"
     ]
    }
   ],
   "source": [
    "await pipeline.find_statement_citations()\n",
    "logger.info(\"已完成find_statement\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python3\n",
    "# -*- coding: utf-8 -*-\n",
    "\"\"\"\n",
    "该模块通过调用 LLM 模型，根据给定的研究主题和文本内容查找相关的语句引用，\n",
    "并支持并发处理文本段。所有功能均封装在 CitationProcessor 类中。\n",
    "\"\"\"\n",
    "\n",
    "import asyncio\n",
    "import logging\n",
    "from asyncio import Semaphore\n",
    "from pathlib import Path\n",
    "from typing import List, Optional\n",
    "\n",
    "from jinja2 import Environment\n",
    "from pyaml_env import parse_config\n",
    "import json_repair\n",
    "\n",
    "from research_agent.core.config import Config\n",
    "from research_agent.core.general_llm import LLM\n",
    "from research_agent.core.query import Query\n",
    "from research_agent.core.utils import tokenize_sentences\n",
    "\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "\n",
    "class FindStatementCitation:\n",
    "    \"\"\"\n",
    "    通过调用 LLM 模型，根据给定的研究主题和文本内容查找相关的语句引用，\n",
    "    并提供对文本段并发处理的功能。\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, base_path: Optional[str] = None):\n",
    "        \"\"\"\n",
    "        初始化 FindStatementCitation 类，加载 LLM 模型配置和提示模板，并设置并发控制信号量。\n",
    "\n",
    "        :param base_path: 提示模板所在的基础路径，默认为当前文件所在目录下的 \"prompts\" 文件夹\n",
    "        \"\"\"\n",
    "        # 加载配置文件并获取默认模型配置\n",
    "        configs = parse_config(Config.YAML_CONFIG)\n",
    "        self.llm = LLM(config=configs[Config.DEFAULT_MODEL])\n",
    "        self.query = Query()\n",
    "\n",
    "        prompt_file = r\"D:\\GoodStudy\\FX15_reference_3\\summary-generation-match\\research_agent\\core\\prompts\\1.jinja\"\n",
    "        try:\n",
    "            with open(prompt_file, \"r\", encoding=\"utf-8\") as f:\n",
    "                template_content = f.read()\n",
    "        except Exception as e:\n",
    "            logger.error(f\"加载提示模板文件失败：{prompt_file}，错误信息：{e}\")\n",
    "            raise\n",
    "\n",
    "        # 使用 Jinja2 加载模板\n",
    "        self.prompt_template = Environment().from_string(template_content)\n",
    "\n",
    "    def _prepare_prompt_messages(self, topic: str, section: str) -> List[dict]:\n",
    "        \"\"\"\n",
    "        准备生成查找语句引用所需的提示消息。\n",
    "\n",
    "        :param topic: 研究主题\n",
    "        :param section: 文本内容\n",
    "        :return: 包含系统和用户提示信息的字典列表\n",
    "        \"\"\"\n",
    "        system_prompt = self.prompt_template.render(role=\"system\", topic=topic)\n",
    "        user_prompt = self.prompt_template.render(\n",
    "            role=\"user\",\n",
    "            survey_draft=tokenize_sentences(section),\n",
    "            topic=topic\n",
    "        )\n",
    "        return [\n",
    "            {\"role\": \"system\", \"content\": system_prompt},\n",
    "            {\"role\": \"user\", \"content\": user_prompt},\n",
    "        ]\n",
    "\n",
    "    async def find_statement_citation(self, topic: str, section: str, max_retries: int = 3) -> List[str]:\n",
    "        \"\"\"\n",
    "        根据研究主题和文本内容调用 LLM 生成回答，提取引用语句。\n",
    "\n",
    "        :param topic: 研究主题\n",
    "        :param section: 预处理后的文本内容\n",
    "        :param max_retries: 最大重试次数\n",
    "        :return: 模型返回的引用语句列表\n",
    "        \"\"\"\n",
    "        prompt_messages = self._prepare_prompt_messages(topic, section)\n",
    "        for attempt in range(max_retries):\n",
    "            try:\n",
    "                response = await self.llm.completion(prompt_messages)\n",
    "                response_data = json_repair.loads(response)\n",
    "                if isinstance(response_data, dict) and \"statements\" in response_data:\n",
    "                    return response_data[\"statements\"]\n",
    "                if isinstance(response_data, list) and len(response_data) > 0 and isinstance(response_data[0], dict) and \"statement_abstract\" in response_data[0] and \"keywords\" in response_data[0] and \"evidence_spans\" in response_data[0]:\n",
    "                    return response_data\n",
    "            except Exception as e:\n",
    "                logger.error(f\"调用 LLM 模型时出错：{e}，尝试次数：{attempt + 1}\")\n",
    "                if attempt < max_retries - 1:\n",
    "                    await asyncio.sleep(0.5)\n",
    "                else:\n",
    "                    logger.error(\"达到最大重试次数，操作失败。\")\n",
    "                    return []\n",
    "\n",
    "    async def process_section(self, section: str, topic: str) -> Optional[List[str]]:\n",
    "        \"\"\"\n",
    "        异步处理单个文本段：\n",
    "          1. 对文本段进行句子分割和格式化；\n",
    "          2. 调用 LLM 模型查找引用语句。\n",
    "\n",
    "        :param section: 原始文本段\n",
    "        :param topic: 研究主题\n",
    "        :return: 模型返回的引用语句列表，出现异常时返回 None\n",
    "        \"\"\"\n",
    "        try:\n",
    "            citations = await self.find_statement_citation(topic=topic, section=section)\n",
    "            return citations\n",
    "        except Exception as e:\n",
    "            logger.error(f\"处理文本段时出错：{e}\")\n",
    "            return None\n",
    "\n",
    "    async def process_all_sections(self, sections: List[str], topic: str) -> List[List[str]]:\n",
    "        \"\"\"\n",
    "        并发处理所有文本段，保持原有顺序并返回每个文本段对应的引用语句结果。\n",
    "\n",
    "        :param sections: 文本段列表\n",
    "        :param topic: 研究主题\n",
    "        :return: 每个文本段返回的引用语句列表集合（过滤掉处理失败的结果）\n",
    "        \"\"\"\n",
    "        tasks = [self.process_section(section, topic) for section in sections]\n",
    "        results = await asyncio.gather(*tasks, return_exceptions=True)\n",
    "        return [result for result in results if result is not None]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "find_statement_citation = FindStatementCitation()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n## 1 Introduction\\n\\nThe technological roadmap of multi-model large models is a critical research area shaping the future of artificial intelligence. This survey aims to assess the current state of the field, identify key challenges, and provide insights into future development paths. The context of the survey is rooted in the rapid advancements in machine learning, particularly in natural language processing (NLP) and computer vision, which have driven the creation of multi-model large models. These models are designed to integrate information from diverse sources such as text, images, and audio for complex tasks. The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.\\n## 2 Transformer-Based Reinforcement Learning Methods'"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "section = pipeline.merged_sections[0]\n",
    "sections = pipeline.merged_sections\n",
    "section"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    }
   ],
   "source": [
    "statement_list = await find_statement_citation.process_all_sections(sections, topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:已完成prepare_draft_info\n"
     ]
    }
   ],
   "source": [
    "# logger.info(\"开始处理引用pipeline\")\n",
    "# pipeline.split_by_primary_headers()\n",
    "# pipeline.merge_sections()\n",
    "# logger.info(\"已合并段落\")\n",
    "# await pipeline.find_statement_citations()\n",
    "# logger.info(\"已完成find_statement\")\n",
    "\n",
    "await pipeline.prepare_draft_info()\n",
    "logger.info(\"已完成prepare_draft_info\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# find_statement_citation:50s\n",
    "# 检索文献：15.5\n",
    "# process_drafts:1min2.4s\n",
    "# \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The technological roadmap of multi-model large models is a critical research area shaping the future of artificial intelligence.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The working memory module is a crucial element for improving the efficiency and generalization of Transformer-based reinforcement learning methods, addressing limitations of previous approaches that heavily rely on model size and inefficient data learning.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The working memory module enables models to store, integrate, and retrieve training information, leading to enhanced performance across diverse tasks, drawing inspiration from cognitive science and neural network models with memory mechanisms.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The working memory module can be implemented in various ways, with examples including Neural Turing Machines (NTMs) by Graves et al. and memory networks by Sukhbaatar et al.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Goyal et al. propose a shared global workspace method that fosters information sharing among neural modules, akin to working memory.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Our approach utilizes LoRA (Low-Rank Adaptation) to enhance the working memory module, capitalizing on its established effectiveness in simple reinforcement learning settings and natural language processing tasks.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Integrating a working memory module into Transformer-based models could offer valuable insights for revisiting earlier memory-augmentation methods, especially with the emergence of more powerful foundation models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: This could potentially lead to more efficient and generalized reinforcement learning models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Cognitive science theories have significantly influenced the development of Transformer-based reinforcement learning methods, providing a foundation for designing adaptable and effective models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Key contributions from cognitive science include the study of working memory, attention mechanisms, and cognitive load, which have been instrumental in adapting cognitive load theory to optimize reinforcement learning models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Alan Baddeley's model of working memory has inspired the design of working memory modules in Transformer-based reinforcement learning, improving the model's information retention and manipulation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: By integrating cognitive science theories, researchers have developed more sophisticated reinforcement learning methods capable of handling complex tasks and environments.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The technological roadmap for multi-model large models currently faces limitations in the range of evaluated concepts.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: To address this, targeted data collection efforts are needed to develop more comprehensive large-scale evaluation datasets.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The zero-shot evaluation paradigm, which typically focuses on a single aspect or person in an image, can lead to mismatches between training and evaluation scenarios.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: This mismatch is exacerbated by non-representative image cropping in evaluation datasets, which differs from the typical photographic framing of image-text corpora.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Enhancing model robustness requires research that focuses on more diverse datasets, as suggested by Liu et al.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Image cropping is used as a data augmentation technique in evaluation datasets, which has been shown to enhance generalization performance in computer vision research, but may not accurately represent standard photographic framing in image-text corpora.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The evaluation datasets employ image cropping as a data augmentation technique, which has been effectively used in computer vision research to enhance generalization performance.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Pre-trained model features have inherent problems that can impede out-of-distribution generalization and may lack the necessary information for new tasks.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The limitations of previous approaches in fine-tuning generalization stem from the assumption that pre-trained model features are inherently free from flaws and contain all necessary information for new tasks, which is not always accurate.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Pre-trained features may contain inherent problems, as noted in the works of Bommasani et al. [2021] and Xue et al. [2023], which can hinder out-of-distribution (OOD) generalization.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Additionally, pre-trained features might not include all the information needed for new tasks, rendering the preservation of these features during fine-tuning inadequate for supporting such tasks.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: This highlights the need for more advanced approaches to address these limitations and improve the effectiveness of pre-trained models for novel tasks.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Large Language Models (LLMs) have demonstrated versatility in various applications, including natural language understanding and code generation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Multi-modal large language models (MLLMs) integrate LLMs with other modalities like images, audio, and video, opening new research avenues.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Sequential recommendation systems rely on user behavior patterns to predict future interactions, and previous research has used various model architectures, including RNNs, CNNs, and attention mechanisms, to understand user preferences.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Additional learning tasks like causal inference, data augmentation, and robust learning have been investigated to improve sequential recommendation performance.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of LLMs into recommendation systems, known as LLM4Rec, is categorized into two approaches: LLM as the recommender and LLM as the enhancer.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: LLaRA is a groundbreaking work that merges traditional sequential recommendation models with LLMs, utilizing both behavioral patterns and reasoning abilities.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The subsection explores the integration of Large Language Models (LLMs) into sequential recommendation systems, highlighting the contributions of LLaRA and LLaMA-VID.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: LLaRA merges traditional sequential recommendation models with LLMs, leveraging behavioral patterns and combining them with LLMs' reasoning and background knowledge to enhance user behavior understanding and recommendation accuracy.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: LLaMA-VID introduces a novel method for processing long video sequences within LLMs, encoding each frame with two tokens to understand and process extensive video content.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of LLMs into sequential recommendation systems faces challenges such as higher computational requirements and interpretability issues, but progress in LLMs is promising for developing sophisticated and effective recommendation engines.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Transformer-based models have revolutionized natural language processing and computer vision, providing a robust architecture for sequence modeling.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Recent advancements in Transformer-based RL methods have focused on overcoming limitations in generalization and adaptability.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Chen et al. proposed Transformer-based architectures for RL, treating it as a sequence modeling problem.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Zheng et al. introduced the Online Decision Transformer (Online DT) to improve online adaptability.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Xu et al. proposed a Hyper-network-based module for efficient offline adaptation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Xu et al. introduced prompt-based DT for task adaptation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Lee et al. proposed a multi-game DT (MDT) that achieved human-level performance across various Atari games.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A working memory module capable of storing, blending, and retrieving training information is introduced to enhance generalization.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: This module draws inspiration from neural network-based models with memory mechanisms, such as Neural Turing Machines (NTMs) and memory networks.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of a working memory module is anticipated to improve model and training efficiency, addressing the limitations of current Transformer-based RL methods.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: GPT and its variants have revolutionized the field of large language models by introducing a novel approach to language modeling.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The original GPT employed a transformer-based architecture for autoregressive text generation, a significant breakthrough.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Variants such as GPT-2, GPT-3, and GPT-4 further enhanced LLM capabilities, demonstrating human-like proficiency in tasks like reasoning, summarization, and translation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: These models were pre-trained on vast text data, enabling them to learn complex linguistic patterns and structures.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The success of GPT has catalyzed research and development in LLMs, leading to numerous open-source and proprietary models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: However, these models also face challenges, including the potential for generating biased or harmful content and the computational demands for training and inference.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Ongoing research is focused on mitigating these issues to improve the robustness, fairness, and efficiency of LLMs.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Long video sequence processing in LLMs is a significant challenge, with traditional methods struggling to handle the large number of tokens required for each frame, leading to inefficiencies and limitations on video length.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The LLaMA-VID framework addresses the challenge of long video sequence processing by encoding each frame with only two tokens, enabling efficient representation and comprehension of video content within the constraints of existing LLMs.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The LLaMA-VID framework uses an encoder and decoder to generate visual embeddings and text-guided features, contributing to its simplicity and effectiveness in video understanding.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The framework employs a customized token generation strategy and instruction tuning to maximize LLMs' potential for image and video understanding.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Despite advancements, the LLaMA-VID framework still faces challenges, such as handling complex temporal relationships in videos and integrating additional modalities to enhance video content understanding.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Future research in video understanding for LLMs is expected to focus on addressing these challenges to expand LLMs' capabilities.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The LLaMA-VID framework is a significant advancement in integrating large language models (LLMs) with visual information for enhanced understanding and processing.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The framework is constructed using an encoder and a decoder, both based on transformer architectures.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The text decoder, capable of utilizing models like BERT or QFormer, processes the visual embeddings and text query to produce an LLM response.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: LLaMA-VID addresses the challenge of processing long video sequences by encoding each frame with just two tokens, enabling it to handle sequences over an hour long—a limitation previously encountered by LLMs.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: This framework represents a substantial development in multimodal models capable of integrating and processing information from multiple modalities, such as text and images.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Traditional object detection systems have a fixed number of classification heads, limiting their ability to predict beyond their trained classes.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Open-vocabulary detection systems use contrastive learning to align object-level visual features with textual class embeddings from pretrained text encoders like BERT, enabling broader object classification during inference.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Our research involves fine-tuning a multimodal model based on LLMs to predict objects of interest based on user queries and input images, followed by using an open-vocabulary detector for specific location prediction.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: We have compiled a high-quality fine-tuning dataset containing 5000 images and approximately 30000 query-answer pairs, which is open-sourced for the research community.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The limitations of current object detection systems are highlighted by the rapid progress in natural language processing and computer vision, particularly affecting robots' ability to interact with the physical world.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Reasoning-based object detection is introduced as a solution to overcome the limitations of current object detection systems, where humans provide abstract queries via natural language, and the model detects the object fulfilling the query.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A multimodal model has been fine-tuned on LLMs to predict objects based on user queries and input images, followed by using an open-vocabulary detector for specific location prediction.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A high-quality fine-tuning dataset with 5000 images and around 30000 query-answer pairs has been created to enhance the model's instruction-following ability, which is open-sourced for the research community.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The fine-tuning of multimodal models based on large language models (LLMs) is introduced as a novel approach to predict objects of interest based on user queries and input images, followed by using predicted object names to guide an open-vocabulary detector for specific location prediction.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A high-quality fine-tuning dataset containing 5000 images and approximately 30000 query-answer pairs has been created to improve the model's instruction-following ability and is open-sourced for further research and development in reasoning-based object detection.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of AI in biomedicine faces challenges in clinical settings due to the complexity of biological systems, the need for high accuracy in diagnostics, and ethical concerns surrounding patient data.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The transition from research to practical clinical use of AI models is a significant challenge.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The complexity of human health presents substantial challenges for AI models, which must handle multifactorial diseases influenced by genetic, environmental, and lifestyle factors.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: AI systems must be robust enough to provide precise predictions and recommendations, given the high stakes in medical decision-making.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Errors in diagnosis or treatment can have severe consequences for patients, emphasizing the need for stringent accuracy standards.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Ethical considerations, particularly concerning patient data privacy and consent, are crucial in AI systems.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: AI systems require extensive datasets for training, often including sensitive patient information, which must be balanced with ethical privacy concerns.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Thorough validation and testing of AI models in clinical settings is emphasized due to the challenges they face.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The development of frameworks to address ethical issues and ensure patient safety is essential in the deployment of AI in healthcare.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of AI into biomedicine requires careful consideration of data sharing and ethical implications, which is particularly challenging in the medical field, especially in digitizing histopathology slides due to limited 'big data'. Overcoming patient privacy regulations at various levels is vital for data sharing, and establishing standardized public repositories for de-identified data is essential. Sharing code and pretrained model weights is also important for knowledge-sharing and ensuring repeatability. Incorporating uncertainty quantification, explainability, and strategies for handling missing data is crucial for developing robust and ethical multimodal AI models. The World Health Organization's ethics and governance guidelines provide a framework for responsible innovation in this area.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The study by Evans et al. (1998) demonstrates the potential for suboptimal performance when physicians can choose to follow or disregard CDSS recommendations.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Advancements in multimodal machine learning for image-based clinical support have been significant, but attention must be paid to data biases and under-representation.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Responsible innovation that adheres to ethical principles, including data privacy and transparent collaboration, is vital for enhancing healthcare efficiency, accuracy, and patient well-being.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Recent advancements in graph neural networks (GNNs) have enabled early diagnosis by synthesizing brain graphs across different axes, including domain, time, and resolution, from minimal connectomic data.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: GNN models have demonstrated state-of-the-art performance in various medical applications.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A multimodal brain graph synthesis framework that considers these three axes jointly could enhance diagnosis-based models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Deep understanding of GNN models is crucial, particularly in explaining their results and selecting reproducible models across multiple datasets.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The development of frugal GNNs, trained with minimal brain graph data, is also necessary to advance the field.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The synthesis of multimodal brain graphs is a crucial research area for enhancing the clinical utility of brain graphs in diagnosing various brain disorders, involving integration across different axes and the use of graph neural networks (GNNs) to improve diagnosis.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The development of a multimodal brain graph synthesis framework that considers all three axes simultaneously is essential for enhancing diagnosis-based models and overcoming limitations in medical imaging and connectomic applications.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: A deeper understanding of GNN models is necessary, including explaining how they generate desired results and identifying reproducible models across multiple datasets.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of graph theory with graph representation learning is vital for improving model learning.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The development of frugal GNNs trained with minimal brain graph data is crucial for more efficient and cost-effective diagnosis.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The synthesis of multimodal brain graphs represents a significant advancement in brain graph analysis, offering potential for more accurate and timely diagnosis of brain disorders.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Addressing current challenges in multimodal brain graph synthesis is vital for realizing its potential and advancing AI in biomedicine.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Recent advancements in graph neural networks (GNNs) have shown promise in enhancing the clinical utility of brain graphs for diagnosing brain disorders.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: GNNs can synthesize brain graphs across various axes, such as domain, time, and resolution, from limited connectomic data.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Breakthrough improvements in medical imaging and connectomic applications have been limited by dataset scarcity.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The intersection of graph theory and graph representation learning offers significant potential for improving model learning.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Developing frugal GNNs trained with minimal brain graph data is necessary.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: These advancements and challenges in multimodal brain graph synthesis frameworks, along with the integration of graph theory and graph representation learning, are crucial for a more comprehensive and accurate analysis of brain graphs in diagnosing brain disorders.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Frugal GNNs are a promising approach for brain graph analysis, designed to operate effectively with minimal data, and involve key elements like efficient graph representation learning and robust GNN architecture.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The integration of graph theory and graph representation learning into frugal GNNs development is vital for designing effective algorithms for brain graph analysis.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Research papers such as 'Graph Neural Networks in Network Neuroscience' and 'DetGPT: Detect What You Need Via Reasoning' demonstrate the potential of GNNs in various fields, including neuroscience.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The paper 'Position: Tensor Networks Are a Valuable Asset for Green AI' emphasizes the importance of efficiency in AI research, applicable to the development of frugal GNNs.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The exponential growth in compute demand within the field of AI has significant implications across economic, social, and environmental dimensions.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The operational and embodied emissions associated with compute resources contribute to climate change, conflicting with the goals of the Paris Agreement.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: These challenges highlight the need for a balanced approach to AI research that considers both the pursuit of accuracy and the efficient use of computational resources.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Efficiency metrics are crucial for the sustainable development of AI, particularly in the context of multi-model large models, and various efficiency metrics are suggested for assessing AI models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The Position paper 'Tensor Networks Are a Valuable Asset for Green AI' highlights the role of tensor networks in decreasing computational complexity and energy consumption.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The survey provides a comprehensive overview of advancements, challenges, and future directions in the field of multi-model large models, examining Transformer-based reinforcement learning, the limitations of current roadmaps, and the integration of large language models with other modalities, while also addressing the influence of GPT and the multifaceted nature of multi-model large models, as well as advancements in LLMs and VLMs, AI's potential in biomedicine, and the future of brain graph analysis, emphasizing the importance of sustainability in AI development and deployment, and highlighting the need to address challenges related to architectural choices, evaluated concepts, and the representation of scenes, actions, and cultures for responsible advancement.\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 20, 新处理: 0, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.42985802 0.48634264 0.4645887  0.42017955 0.49628171 0.45702148\n",
      " 0.42082848 0.48881366 0.42334293 0.44917732 0.39181657 0.41448295\n",
      " 0.51488852 0.4576748  0.43713837 0.42069762 0.47388996 0.45902596\n",
      " 0.43082161 0.4011676 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.42985802 0.48634264 0.4645887  0.42017955 0.49628171 0.45702148\n",
      " 0.42082848 0.48881366 0.42334293 0.44917732 0.39181657 0.41448295\n",
      " 0.51488852 0.4576748  0.43713837 0.42069762 0.47388996 0.45902596\n",
      " 0.43082161 0.4011676 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [14.5078125]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 22\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 2, 新处理: 18, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3.3PROPOSEDAPPROACH\n",
      "We hypothesize that error se...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 5.3 Generalized Zero-shot Learning\n",
      "Zero-shot lea...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # The non-robust feature from pre-trained model\n",
      "Th...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # BSynthetic Example\n",
      "Continuing from Sec. 3.1, we ...\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [15.953125, 15.4609375, 14.5390625, 14.203125, 13.71875, 13.3046875, 12.96875, 12.609375, 16.6875, 15.6875, 14.921875, 14.7890625, 14.6875, 14.109375, 13.5703125, 13.046875, 14.7265625, 14.4609375, 14.3125, 12.671875, 12.359375, 11.5703125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 33.18 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # Knowledge Consolidation\n",
      "To further combat catast...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.56345692 0.50409404 0.57329328 0.52575534 0.51588726 0.52351117\n",
      " 0.55236908 0.46444797 0.57118138 0.48597334 0.51046429 0.50942102\n",
      " 0.55097191 0.44919976 0.41595577 0.55527    0.44782305 0.51354747\n",
      " 0.4894172  0.45189486]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.56345692 0.50409404 0.57329328 0.52575534 0.51588726 0.52351117\n",
      " 0.55236908 0.46444797 0.57118138 0.48597334 0.51046429 0.50942102\n",
      " 0.55097191 0.44919976 0.41595577 0.55527    0.44782305 0.51354747\n",
      " 0.4894172  0.45189486]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.6818747  0.61174368 0.63400974 0.5334568  0.56633784 0.62189423\n",
      " 0.58913796 0.57156391 0.59255793 0.51135347 0.5902969  0.55638207\n",
      " 0.59064944 0.59393626 0.56185459 0.57275421 0.57198336 0.54892262\n",
      " 0.56374179 0.59683054]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.6818747  0.61174368 0.63400974 0.5334568  0.56633784 0.62189423\n",
      " 0.58913796 0.57156391 0.59255793 0.51135347 0.5902969  0.55638207\n",
      " 0.59064944 0.59393626 0.56185459 0.57275421 0.57198336 0.54892262\n",
      " 0.56374179 0.59683054]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.69319999 0.60363917 0.62765077 0.55532181 0.58879751 0.56883244\n",
      " 0.56911923 0.57386363 0.56380816 0.52285275 0.57468466 0.55513288\n",
      " 0.5694696  0.51564756 0.60808322 0.58794721 0.52562608 0.55867641\n",
      " 0.54542071 0.5764973 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.69319999 0.60363917 0.62765077 0.55532181 0.58879751 0.56883244\n",
      " 0.56911923 0.57386363 0.56380816 0.52285275 0.57468466 0.55513288\n",
      " 0.5694696  0.51564756 0.60808322 0.58794721 0.52562608 0.55867641\n",
      " 0.54542071 0.5764973 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 6 Corpus Genre\n",
      "Large LMs are typically pre-train...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.57087402 0.49390858 0.51650013 0.56570695 0.46855407 0.47745147\n",
      " 0.55279632 0.55854256 0.57145222 0.60864924 0.4978206  0.605327\n",
      " 0.5448707  0.48156053 0.5669796  0.59796326 0.45521135 0.48824552\n",
      " 0.57315975 0.53137272]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.57087402 0.49390858 0.51650013 0.56570695 0.46855407 0.47745147\n",
      " 0.55279632 0.55854256 0.57145222 0.60864924 0.4978206  0.605327\n",
      " 0.5448707  0.48156053 0.5669796  0.59796326 0.45521135 0.48824552\n",
      " 0.57315975 0.53137272]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.47942979 0.49918135 0.40340592 0.46907573 0.52383634 0.46507464\n",
      " 0.46375111 0.41681063 0.42318058 0.49113679 0.53052687 0.47758349\n",
      " 0.51146875 0.42492395 0.48131445 0.46784657 0.39406717 0.45003289\n",
      " 0.44178243 0.52334161]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.47942979 0.49918135 0.40340592 0.46907573 0.52383634 0.46507464\n",
      " 0.46375111 0.41681063 0.42318058 0.49113679 0.53052687 0.47758349\n",
      " 0.51146875 0.42492395 0.48131445 0.46784657 0.39406717 0.45003289\n",
      " 0.44178243 0.52334161]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.50771988 0.40685601 0.43972864 0.44237288 0.44018199 0.45690701\n",
      " 0.47032207 0.42782481 0.53791116 0.54830097 0.48940165 0.49375017\n",
      " 0.49025861 0.4285807  0.53847114 0.46609891 0.41097605 0.4059386\n",
      " 0.46995325 0.48255328]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.50771988 0.40685601 0.43972864 0.44237288 0.44018199 0.45690701\n",
      " 0.47032207 0.42782481 0.53791116 0.54830097 0.48940165 0.49375017\n",
      " 0.49025861 0.4285807  0.53847114 0.46609891 0.41097605 0.4059386\n",
      " 0.46995325 0.48255328]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59552467 0.60482079 0.61385652 0.57255322 0.56997239 0.58709358\n",
      " 0.59272938 0.53481683 0.6133732  0.59367849 0.60194359 0.59174441\n",
      " 0.5902904  0.5383672  0.58973732 0.54260949 0.53203294 0.55551164\n",
      " 0.52775707 0.44665936]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59552467 0.60482079 0.61385652 0.57255322 0.56997239 0.58709358\n",
      " 0.59272938 0.53481683 0.6133732  0.59367849 0.60194359 0.59174441\n",
      " 0.5902904  0.5383672  0.58973732 0.54260949 0.53203294 0.55551164\n",
      " 0.52775707 0.44665936]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53867458 0.59841678 0.55580466 0.55822313 0.46331051 0.51616065\n",
      " 0.55520306 0.53250999 0.60629156 0.51271973 0.52297962 0.49311715\n",
      " 0.52003265 0.50146883 0.58094354 0.5060529  0.47439448 0.4982846\n",
      " 0.47416035 0.42977905]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53867458 0.59841678 0.55580466 0.55822313 0.46331051 0.51616065\n",
      " 0.55520306 0.53250999 0.60629156 0.51271973 0.52297962 0.49311715\n",
      " 0.52003265 0.50146883 0.58094354 0.5060529  0.47439448 0.4982846\n",
      " 0.47416035 0.42977905]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.4879574  0.3632995  0.46297543 0.52293706 0.50977293 0.51290488\n",
      " 0.52199671 0.41272483 0.46905595 0.50665791 0.40060855 0.44071772\n",
      " 0.39000647 0.48273837 0.5329722  0.46888519 0.52820679 0.47875214\n",
      " 0.39544363 0.55168312]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.4879574  0.3632995  0.46297543 0.52293706 0.50977293 0.51290488\n",
      " 0.52199671 0.41272483 0.46905595 0.50665791 0.40060855 0.44071772\n",
      " 0.39000647 0.48273837 0.5329722  0.46888519 0.52820679 0.47875214\n",
      " 0.39544363 0.55168312]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.61179519 0.58182529 0.70604145 0.63906911 0.5957375  0.61320098\n",
      " 0.57591431 0.55749811 0.61082803 0.58559795 0.57628588 0.5089172\n",
      " 0.54252079 0.62118706 0.57494997 0.51724828 0.48741612 0.64187473\n",
      " 0.54989071 0.4860762 ]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.61179519 0.58182529 0.70604145 0.63906911 0.5957375  0.61320098\n",
      " 0.57591431 0.55749811 0.61082803 0.58559795 0.57628588 0.5089172\n",
      " 0.54252079 0.62118706 0.57494997 0.51724828 0.48741612 0.64187473\n",
      " 0.54989071 0.4860762 ]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.50566042 0.43098871 0.46410489 0.39919605 0.43343458 0.39952552\n",
      " 0.3888011  0.39776668 0.43770877 0.46370058 0.40712112 0.46305585\n",
      " 0.44702247 0.4340168  0.42030596 0.4501144  0.42324067 0.43477813\n",
      " 0.41554422 0.45315141]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.50566042 0.43098871 0.46410489 0.39919605 0.43343458 0.39952552\n",
      " 0.3888011  0.39776668 0.43770877 0.46370058 0.40712112 0.46305585\n",
      " 0.44702247 0.4340168  0.42030596 0.4501144  0.42324067 0.43477813\n",
      " 0.41554422 0.45315141]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.60003955 0.47929526 0.4979409  0.49867213 0.47743526 0.45535677\n",
      " 0.45235139 0.49232296 0.47776473 0.48681498 0.4949849  0.43046408\n",
      " 0.50718341 0.41044294 0.45325811 0.40941008 0.46949055 0.4522764\n",
      " 0.49374549 0.47961092]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.60003955 0.47929526 0.4979409  0.49867213 0.47743526 0.45535677\n",
      " 0.45235139 0.49232296 0.47776473 0.48681498 0.4949849  0.43046408\n",
      " 0.50718341 0.41044294 0.45325811 0.40941008 0.46949055 0.4522764\n",
      " 0.49374549 0.47961092]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59767381 0.67035575 0.61518464 0.61011533 0.64157983 0.63992333\n",
      " 0.5814443  0.55204473 0.6298866  0.59150569 0.56334335 0.59491021\n",
      " 0.56990433 0.57585357 0.51768722 0.6135834  0.55865617 0.5938263\n",
      " 0.52340568 0.63603152]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59767381 0.67035575 0.61518464 0.61011533 0.64157983 0.63992333\n",
      " 0.5814443  0.55204473 0.6298866  0.59150569 0.56334335 0.59491021\n",
      " 0.56990433 0.57585357 0.51768722 0.6135834  0.55865617 0.5938263\n",
      " 0.52340568 0.63603152]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.559301   0.5823509  0.61496248 0.47320295 0.59163252 0.55617181\n",
      " 0.59360395 0.53244587 0.56159686 0.45049496 0.56058572 0.47742698\n",
      " 0.49243726 0.48342459 0.59602454 0.61284988 0.53808795 0.50717517\n",
      " 0.50870503 0.58574241]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.559301   0.5823509  0.61496248 0.47320295 0.59163252 0.55617181\n",
      " 0.59360395 0.53244587 0.56159686 0.45049496 0.56058572 0.47742698\n",
      " 0.49243726 0.48342459 0.59602454 0.61284988 0.53808795 0.50717517\n",
      " 0.50870503 0.58574241]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.51329952 0.48820759 0.52388122 0.60176911 0.51728169 0.53956815\n",
      " 0.52349981 0.52972912 0.49960286 0.55299967 0.56204413 0.60997003\n",
      " 0.48295803 0.50853549 0.49961981 0.52752005 0.48635326 0.52508104\n",
      " 0.48160166 0.53493798]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.51329952 0.48820759 0.52388122 0.60176911 0.51728169 0.53956815\n",
      " 0.52349981 0.52972912 0.49960286 0.55299967 0.56204413 0.60997003\n",
      " 0.48295803 0.50853549 0.49961981 0.52752005 0.48635326 0.52508104\n",
      " 0.48160166 0.53493798]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.67954311 0.69767379 0.68756916 0.68074654 0.63963509 0.67963713\n",
      " 0.64111156 0.68226342 0.62330415 0.66253579 0.65157429 0.62648792\n",
      " 0.7120918  0.59357291 0.58101424 0.66378025 0.62375804 0.69691962\n",
      " 0.65781402 0.67264325]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.67954311 0.69767379 0.68756916 0.68074654 0.63963509 0.67963713\n",
      " 0.64111156 0.68226342 0.62330415 0.66253579 0.65157429 0.62648792\n",
      " 0.7120918  0.59357291 0.58101424 0.66378025 0.62375804 0.69691962\n",
      " 0.65781402 0.67264325]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 5 EXPERIMENTS AND RESULTS\n",
      "\n",
      "# 5.1 EXPERIMENTAL RE...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.56609801 0.62694687 0.55192354 0.47689673 0.58538207 0.54998854\n",
      " 0.57654163 0.60735126 0.59731267 0.60300794 0.61795344 0.50061421\n",
      " 0.52545194 0.598066   0.57564987 0.53144595 0.61580906 0.4887447\n",
      " 0.46768371 0.56208554]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.56609801 0.62694687 0.55192354 0.47689673 0.58538207 0.54998854\n",
      " 0.57654163 0.60735126 0.59731267 0.60300794 0.61795344 0.50061421\n",
      " 0.52545194 0.598066   0.57564987 0.53144595 0.61580906 0.4887447\n",
      " 0.46768371 0.56208554]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.54114325 0.49939376 0.56547845 0.52915342 0.51434521 0.53977789\n",
      " 0.51840833 0.56551723 0.58353264 0.53218069 0.54129604 0.54889961\n",
      " 0.50084862 0.51910465 0.55094711 0.49546139 0.49606546 0.50437189\n",
      " 0.53361038 0.54504837]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54114325 0.49939376 0.56547845 0.52915342 0.51434521 0.53977789\n",
      " 0.51840833 0.56551723 0.58353264 0.53218069 0.54129604 0.54889961\n",
      " 0.50084862 0.51910465 0.55094711 0.49546139 0.49606546 0.50437189\n",
      " 0.53361038 0.54504837]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.61925857 0.67165326 0.66525179 0.65995186 0.69775099 0.67300138\n",
      " 0.66903794 0.60530002 0.68823566 0.62058399 0.68025315 0.58460667\n",
      " 0.60646202 0.64751943 0.56200979 0.62688726 0.64505155 0.57215998\n",
      " 0.63790659 0.64078861]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.61925857 0.67165326 0.66525179 0.65995186 0.69775099 0.67300138\n",
      " 0.66903794 0.60530002 0.68823566 0.62058399 0.68025315 0.58460667\n",
      " 0.60646202 0.64751943 0.56200979 0.62688726 0.64505155 0.57215998\n",
      " 0.63790659 0.64078861]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.44183228 0.47434881 0.40572174 0.53927935 0.38281202 0.45360247\n",
      " 0.46581096 0.45160253 0.43325783 0.4514011  0.4290967  0.43059291\n",
      " 0.47542651 0.46147245 0.44893624 0.41980012 0.39857561 0.41326625\n",
      " 0.47233947 0.52896175]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.44183228 0.47434881 0.40572174 0.53927935 0.38281202 0.45360247\n",
      " 0.46581096 0.45160253 0.43325783 0.4514011  0.4290967  0.43059291\n",
      " 0.47542651 0.46147245 0.44893624 0.41980012 0.39857561 0.41326625\n",
      " 0.47233947 0.52896175]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.69636208 0.61543993 0.63531446 0.56943472 0.58439397 0.60539665\n",
      " 0.57991965 0.54221046 0.59396945 0.56166482 0.55395164 0.58447371\n",
      " 0.57952624 0.60073575 0.57215733 0.53203656 0.58394671 0.56761356\n",
      " 0.56493876 0.60319706]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.69636208 0.61543993 0.63531446 0.56943472 0.58439397 0.60539665\n",
      " 0.57991965 0.54221046 0.59396945 0.56166482 0.55395164 0.58447371\n",
      " 0.57952624 0.60073575 0.57215733 0.53203656 0.58394671 0.56761356\n",
      " 0.56493876 0.60319706]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.4579251  0.51190957 0.43862967 0.44404741 0.47358969 0.46779932\n",
      " 0.53062882 0.38761499 0.40688944 0.38246466 0.45162871 0.4607703\n",
      " 0.41378449 0.41593098 0.37158788 0.45865892 0.47021534 0.39751726\n",
      " 0.47047104 0.46960493]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.4579251  0.51190957 0.43862967 0.44404741 0.47358969 0.46779932\n",
      " 0.53062882 0.38761499 0.40688944 0.38246466 0.45162871 0.4607703\n",
      " 0.41378449 0.41593098 0.37158788 0.45865892 0.47021534 0.39751726\n",
      " 0.47047104 0.46960493]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.56501642 0.57111544 0.55288243 0.49355195 0.51397178 0.54588542\n",
      " 0.54969348 0.39380706 0.42751507 0.50284737 0.43677199 0.41499929\n",
      " 0.5849137  0.38298512 0.43448879 0.41553364 0.47823674 0.4089341\n",
      " 0.51164632 0.47471011]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.56501642 0.57111544 0.55288243 0.49355195 0.51397178 0.54588542\n",
      " 0.54969348 0.39380706 0.42751507 0.50284737 0.43677199 0.41499929\n",
      " 0.5849137  0.38298512 0.43448879 0.41553364 0.47823674 0.4089341\n",
      " 0.51164632 0.47471011]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.60867513 0.54944601 0.56228374 0.48100957 0.5417784  0.50658577\n",
      " 0.54030451 0.46556538 0.57631756 0.56539997 0.49619435 0.53011885\n",
      " 0.53211657 0.52288173 0.52489166 0.51641741 0.50200469 0.53973104\n",
      " 0.50043871 0.58079777]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.60867513 0.54944601 0.56228374 0.48100957 0.5417784  0.50658577\n",
      " 0.54030451 0.46556538 0.57631756 0.56539997 0.49619435 0.53011885\n",
      " 0.53211657 0.52288173 0.52489166 0.51641741 0.50200469 0.53973104\n",
      " 0.50043871 0.58079777]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.61138013 0.60224797 0.59550541 0.64975122 0.61019463 0.50565167\n",
      " 0.55966429 0.61019041 0.54892393 0.53990385 0.51720134 0.56060669\n",
      " 0.54908918 0.56814162 0.56601664 0.56191535 0.53772162 0.57873395\n",
      " 0.54724422 0.5573252 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.61138013 0.60224797 0.59550541 0.64975122 0.61019463 0.50565167\n",
      " 0.55966429 0.61019041 0.54892393 0.53990385 0.51720134 0.56060669\n",
      " 0.54908918 0.56814162 0.56601664 0.56191535 0.53772162 0.57873395\n",
      " 0.54724422 0.5573252 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64670327 0.65189525 0.65026509 0.61212964 0.64373573 0.67812746\n",
      " 0.70690555 0.62576066 0.64786399 0.67082729 0.61964498 0.61421254\n",
      " 0.65569452 0.61336158 0.6032289  0.64906717 0.66189005 0.56588929\n",
      " 0.64383623 0.64551226]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64670327 0.65189525 0.65026509 0.61212964 0.64373573 0.67812746\n",
      " 0.70690555 0.62576066 0.64786399 0.67082729 0.61964498 0.61421254\n",
      " 0.65569452 0.61336158 0.6032289  0.64906717 0.66189005 0.56588929\n",
      " 0.64383623 0.64551226]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.65027602 0.65002911 0.68909679 0.58397657 0.64642436 0.69883568\n",
      " 0.70231519 0.67128392 0.64519369 0.66065316 0.6960142  0.64651613\n",
      " 0.60447106 0.65729736 0.56095134 0.63105692 0.61931217 0.7036516\n",
      " 0.68111318 0.64053067]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.65027602 0.65002911 0.68909679 0.58397657 0.64642436 0.69883568\n",
      " 0.70231519 0.67128392 0.64519369 0.66065316 0.6960142  0.64651613\n",
      " 0.60447106 0.65729736 0.56095134 0.63105692 0.61931217 0.7036516\n",
      " 0.68111318 0.64053067]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.77486671 0.75725728 0.72155089 0.70954231 0.7057393  0.64358264\n",
      " 0.61221251 0.67339399 0.65852841 0.68413644 0.6648682  0.661286\n",
      " 0.64643231 0.69046306 0.61038923 0.6522606  0.65655025 0.65692377\n",
      " 0.71644016 0.61759636]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.77486671 0.75725728 0.72155089 0.70954231 0.7057393  0.64358264\n",
      " 0.61221251 0.67339399 0.65852841 0.68413644 0.6648682  0.661286\n",
      " 0.64643231 0.69046306 0.61038923 0.6522606  0.65655025 0.65692377\n",
      " 0.71644016 0.61759636]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.50387607 0.44682206 0.50214174 0.46206131 0.47693192 0.47163933\n",
      " 0.51185822 0.44445167 0.5058154  0.47814644 0.40053126 0.43219716\n",
      " 0.49161685 0.52896916 0.43848391 0.48079414 0.49235059 0.47310155\n",
      " 0.38624354 0.49703773]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.50387607 0.44682206 0.50214174 0.46206131 0.47693192 0.47163933\n",
      " 0.51185822 0.44445167 0.5058154  0.47814644 0.40053126 0.43219716\n",
      " 0.49161685 0.52896916 0.43848391 0.48079414 0.49235059 0.47310155\n",
      " 0.38624354 0.49703773]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.4973735  0.56083494 0.51573019 0.5503098  0.52590285 0.56021405\n",
      " 0.49725479 0.4395542  0.45777909 0.52978602 0.54509143 0.45545879\n",
      " 0.47466212 0.43217386 0.57805101 0.39692283 0.34850471 0.50497729\n",
      " 0.52787191 0.51141824]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.4973735  0.56083494 0.51573019 0.5503098  0.52590285 0.56021405\n",
      " 0.49725479 0.4395542  0.45777909 0.52978602 0.54509143 0.45545879\n",
      " 0.47466212 0.43217386 0.57805101 0.39692283 0.34850471 0.50497729\n",
      " 0.52787191 0.51141824]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.55557919 0.52416038 0.55485652 0.55387672 0.55498642 0.50932568\n",
      " 0.58985883 0.54010405 0.54394561 0.52891849 0.54097295 0.54081819\n",
      " 0.55981875 0.47642259 0.45350013 0.50708366 0.59677383 0.50857449\n",
      " 0.52802698 0.53570724]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.55557919 0.52416038 0.55485652 0.55387672 0.55498642 0.50932568\n",
      " 0.58985883 0.54010405 0.54394561 0.52891849 0.54097295 0.54081819\n",
      " 0.55981875 0.47642259 0.45350013 0.50708366 0.59677383 0.50857449\n",
      " 0.52802698 0.53570724]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64268162 0.50023164 0.56061826 0.55213821 0.53006137 0.5117598\n",
      " 0.54005478 0.48099997 0.53848217 0.55593391 0.53711597 0.54023004\n",
      " 0.6282986  0.56331464 0.52703918 0.52983198 0.49948148 0.47400412\n",
      " 0.54894274 0.58561541]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64268162 0.50023164 0.56061826 0.55213821 0.53006137 0.5117598\n",
      " 0.54005478 0.48099997 0.53848217 0.55593391 0.53711597 0.54023004\n",
      " 0.6282986  0.56331464 0.52703918 0.52983198 0.49948148 0.47400412\n",
      " 0.54894274 0.58561541]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53336301 0.54125198 0.55326823 0.51830272 0.52102277 0.57050016\n",
      " 0.50603063 0.5711164  0.5145268  0.53785064 0.50003874 0.49331876\n",
      " 0.4898582  0.51299944 0.5706926  0.4649093  0.53464052 0.56834282\n",
      " 0.46825715 0.47888824]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53336301 0.54125198 0.55326823 0.51830272 0.52102277 0.57050016\n",
      " 0.50603063 0.5711164  0.5145268  0.53785064 0.50003874 0.49331876\n",
      " 0.4898582  0.51299944 0.5706926  0.4649093  0.53464052 0.56834282\n",
      " 0.46825715 0.47888824]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.72032899 0.69399572 0.69504602 0.76218555 0.65397215 0.65495398\n",
      " 0.70124262 0.71450657 0.66530522 0.72068014 0.63204972 0.6955427\n",
      " 0.63857028 0.59674503 0.64084847 0.59902291 0.63865042 0.63119603\n",
      " 0.64655476 0.67729802]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.72032899 0.69399572 0.69504602 0.76218555 0.65397215 0.65495398\n",
      " 0.70124262 0.71450657 0.66530522 0.72068014 0.63204972 0.6955427\n",
      " 0.63857028 0.59674503 0.64084847 0.59902291 0.63865042 0.63119603\n",
      " 0.64655476 0.67729802]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.5252206  0.45884435 0.50736035 0.5279191  0.5283481  0.51620023\n",
      " 0.5011941  0.56397224 0.51602027 0.5067939  0.51649998 0.5213862\n",
      " 0.41225969 0.48719932 0.50498223 0.33529901 0.52393077 0.53140445\n",
      " 0.42984126 0.47533423]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.5252206  0.45884435 0.50736035 0.5279191  0.5283481  0.51620023\n",
      " 0.5011941  0.56397224 0.51602027 0.5067939  0.51649998 0.5213862\n",
      " 0.41225969 0.48719932 0.50498223 0.33529901 0.52393077 0.53140445\n",
      " 0.42984126 0.47533423]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53057916 0.58727113 0.52206215 0.50607232 0.48696546 0.56038242\n",
      " 0.46704545 0.48092381 0.42045601 0.44164907 0.3985555  0.43225042\n",
      " 0.47975592 0.52249111 0.50112896 0.43929107 0.50923389 0.49478352\n",
      " 0.3906707  0.55631623]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53057916 0.58727113 0.52206215 0.50607232 0.48696546 0.56038242\n",
      " 0.46704545 0.48092381 0.42045601 0.44164907 0.3985555  0.43225042\n",
      " 0.47975592 0.52249111 0.50112896 0.43929107 0.50923389 0.49478352\n",
      " 0.3906707  0.55631623]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49930845 0.47281646 0.52670329 0.51161402 0.51515501 0.48979668\n",
      " 0.52576915 0.52420542 0.47073063 0.5370848  0.43880051 0.44763035\n",
      " 0.55521223 0.49164686 0.47143013 0.52880828 0.49436512 0.4887121\n",
      " 0.50927016 0.47765956]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49930845 0.47281646 0.52670329 0.51161402 0.51515501 0.48979668\n",
      " 0.52576915 0.52420542 0.47073063 0.5370848  0.43880051 0.44763035\n",
      " 0.55521223 0.49164686 0.47143013 0.52880828 0.49436512 0.4887121\n",
      " 0.50927016 0.47765956]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.67843125 0.68432135 0.67837953 0.67424082 0.65051169 0.64535916\n",
      " 0.59476078 0.46679161 0.58265428 0.61454179 0.68610879 0.62013026\n",
      " 0.59303534 0.60472872 0.58164199 0.60766391 0.58242504 0.45532367\n",
      " 0.58398297 0.44643734]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.67843125 0.68432135 0.67837953 0.67424082 0.65051169 0.64535916\n",
      " 0.59476078 0.46679161 0.58265428 0.61454179 0.68610879 0.62013026\n",
      " 0.59303534 0.60472872 0.58164199 0.60766391 0.58242504 0.45532367\n",
      " 0.58398297 0.44643734]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.63179549 0.57435209 0.54703305 0.57527553 0.58141734 0.64340919\n",
      " 0.63515521 0.56212345 0.53900937 0.54345422 0.61760273 0.50518014\n",
      " 0.58776912 0.62709018 0.53335652 0.53279821 0.55053578 0.60486499\n",
      " 0.52120862 0.57609396]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.63179549 0.57435209 0.54703305 0.57527553 0.58141734 0.64340919\n",
      " 0.63515521 0.56212345 0.53900937 0.54345422 0.61760273 0.50518014\n",
      " 0.58776912 0.62709018 0.53335652 0.53279821 0.55053578 0.60486499\n",
      " 0.52120862 0.57609396]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.57606355 0.47840222 0.55133918 0.4968787  0.4850935  0.54277348\n",
      " 0.37049934 0.41716427 0.51661881 0.4755819  0.46608121 0.49939082\n",
      " 0.46687921 0.51835814 0.47457276 0.45654895 0.48994371 0.44769992\n",
      " 0.49073934 0.54132662]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.57606355 0.47840222 0.55133918 0.4968787  0.4850935  0.54277348\n",
      " 0.37049934 0.41716427 0.51661881 0.4755819  0.46608121 0.49939082\n",
      " 0.46687921 0.51835814 0.47457276 0.45654895 0.48994371 0.44769992\n",
      " 0.49073934 0.54132662]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.63897213 0.63137919 0.61309652 0.48663507 0.59523697 0.59647871\n",
      " 0.56492611 0.56909068 0.58302996 0.63288092 0.62556475 0.58586722\n",
      " 0.49289648 0.59562082 0.54223417 0.55896786 0.65991851 0.55690754\n",
      " 0.55195151 0.55077008]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.63897213 0.63137919 0.61309652 0.48663507 0.59523697 0.59647871\n",
      " 0.56492611 0.56909068 0.58302996 0.63288092 0.62556475 0.58586722\n",
      " 0.49289648 0.59562082 0.54223417 0.55896786 0.65991851 0.55690754\n",
      " 0.55195151 0.55077008]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.72922261 0.74530216 0.74526149 0.72835432 0.63781699 0.70858345\n",
      " 0.71505916 0.65905309 0.64777811 0.6418037  0.65897698 0.63107027\n",
      " 0.65120443 0.66912884 0.67584755 0.64610278 0.72950551 0.61622859\n",
      " 0.51765784 0.62261564]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.72922261 0.74530216 0.74526149 0.72835432 0.63781699 0.70858345\n",
      " 0.71505916 0.65905309 0.64777811 0.6418037  0.65897698 0.63107027\n",
      " 0.65120443 0.66912884 0.67584755 0.64610278 0.72950551 0.61622859\n",
      " 0.51765784 0.62261564]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.71190016 0.70429952 0.71335364 0.71050443 0.68104223 0.67772138\n",
      " 0.67793991 0.6866348  0.67270865 0.67043012 0.68350855 0.62333355\n",
      " 0.66030821 0.70096075 0.69890278 0.54012067 0.5727195  0.67828556\n",
      " 0.68579127 0.69885266]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.71190016 0.70429952 0.71335364 0.71050443 0.68104223 0.67772138\n",
      " 0.67793991 0.6866348  0.67270865 0.67043012 0.68350855 0.62333355\n",
      " 0.66030821 0.70096075 0.69890278 0.54012067 0.5727195  0.67828556\n",
      " 0.68579127 0.69885266]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64820921 0.62510469 0.58927153 0.57710656 0.59134322 0.65518896\n",
      " 0.63755373 0.64712752 0.57623611 0.63987395 0.6507621  0.63450089\n",
      " 0.58042325 0.56843728 0.66701866 0.64337969 0.64750793 0.62839301\n",
      " 0.64348358 0.58832807]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64820921 0.62510469 0.58927153 0.57710656 0.59134322 0.65518896\n",
      " 0.63755373 0.64712752 0.57623611 0.63987395 0.6507621  0.63450089\n",
      " 0.58042325 0.56843728 0.66701866 0.64337969 0.64750793 0.62839301\n",
      " 0.64348358 0.58832807]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62506406 0.63677947 0.579362   0.55725757 0.57902254 0.54351256\n",
      " 0.60030186 0.69389188 0.59151071 0.71334818 0.63915723 0.53886659\n",
      " 0.57724869 0.63413387 0.51513283 0.62897812 0.58184665 0.46058181\n",
      " 0.57820527 0.59615713]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62506406 0.63677947 0.579362   0.55725757 0.57902254 0.54351256\n",
      " 0.60030186 0.69389188 0.59151071 0.71334818 0.63915723 0.53886659\n",
      " 0.57724869 0.63413387 0.51513283 0.62897812 0.58184665 0.46058181\n",
      " 0.57820527 0.59615713]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3.3PROPOSEDAPPROACH\n",
      "We hypothesize that error se...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.69757643 0.69550502 0.57775015 0.65528335 0.62885173 0.64403492\n",
      " 0.60465398 0.58389153 0.58105163 0.60562445 0.57742149 0.5503534\n",
      " 0.65350827 0.61191233 0.60181535 0.57934658 0.60024016 0.66006458\n",
      " 0.46730888 0.56821404]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.69757643 0.69550502 0.57775015 0.65528335 0.62885173 0.64403492\n",
      " 0.60465398 0.58389153 0.58105163 0.60562445 0.57742149 0.5503534\n",
      " 0.65350827 0.61191233 0.60181535 0.57934658 0.60024016 0.66006458\n",
      " 0.46730888 0.56821404]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.66156655 0.65266294 0.65113095 0.64819436 0.65296863 0.53862401\n",
      " 0.64266249 0.66072104 0.59583116 0.61678145 0.67275065 0.68811226\n",
      " 0.6172303  0.62875744 0.64581714 0.59484755 0.63975653 0.61305507\n",
      " 0.64958968 0.60756822]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.66156655 0.65266294 0.65113095 0.64819436 0.65296863 0.53862401\n",
      " 0.64266249 0.66072104 0.59583116 0.61678145 0.67275065 0.68811226\n",
      " 0.6172303  0.62875744 0.64581714 0.59484755 0.63975653 0.61305507\n",
      " 0.64958968 0.60756822]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49755525 0.51027262 0.49824586 0.54394501 0.5025598  0.46048816\n",
      " 0.47024949 0.48020452 0.54309703 0.47636605 0.49017268 0.47198733\n",
      " 0.46141056 0.43803064 0.52196542 0.49606669 0.52470078 0.44460303\n",
      " 0.43285157 0.41287975]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49755525 0.51027262 0.49824586 0.54394501 0.5025598  0.46048816\n",
      " 0.47024949 0.48020452 0.54309703 0.47636605 0.49017268 0.47198733\n",
      " 0.46141056 0.43803064 0.52196542 0.49606669 0.52470078 0.44460303\n",
      " 0.43285157 0.41287975]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3.3.3 LINEAR TRANSFORMER\n",
      "Recent research by Schl...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49745574 0.48423481 0.4888283  0.47504354 0.53318617 0.49325944\n",
      " 0.4857635  0.45169695 0.53451651 0.52394066 0.59296867 0.52658952\n",
      " 0.49943787 0.54406866 0.53669153 0.51004433 0.53834289 0.48047673\n",
      " 0.52181157 0.59783059]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49745574 0.48423481 0.4888283  0.47504354 0.53318617 0.49325944\n",
      " 0.4857635  0.45169695 0.53451651 0.52394066 0.59296867 0.52658952\n",
      " 0.49943787 0.54406866 0.53669153 0.51004433 0.53834289 0.48047673\n",
      " 0.52181157 0.59783059]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.55345516 0.51848072 0.51565223 0.4764726  0.48281732 0.4080646\n",
      " 0.51111996 0.53718529 0.52617076 0.5041538  0.52605278 0.51276214\n",
      " 0.47524401 0.52068516 0.5313052  0.5435266  0.46824364 0.4970359\n",
      " 0.51637    0.53146207]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.55345516 0.51848072 0.51565223 0.4764726  0.48281732 0.4080646\n",
      " 0.51111996 0.53718529 0.52617076 0.5041538  0.52605278 0.51276214\n",
      " 0.47524401 0.52068516 0.5313052  0.5435266  0.46824364 0.4970359\n",
      " 0.51637    0.53146207]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.43310057 0.48574382 0.4219733  0.42992409 0.43870427 0.42636801\n",
      " 0.45703094 0.4845243  0.48835857 0.43656593 0.46123219 0.44042357\n",
      " 0.38922041 0.49323092 0.41888469 0.40029105 0.44809874 0.45378714\n",
      " 0.4838329  0.37246431]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.43310057 0.48574382 0.4219733  0.42992409 0.43870427 0.42636801\n",
      " 0.45703094 0.4845243  0.48835857 0.43656593 0.46123219 0.44042357\n",
      " 0.38922041 0.49323092 0.41888469 0.40029105 0.44809874 0.45378714\n",
      " 0.4838329  0.37246431]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.37453442 0.36021149 0.32895082 0.39482679 0.37455378 0.41505303\n",
      " 0.33723114 0.3729592  0.33959168 0.3288647  0.33822618 0.41494017\n",
      " 0.39291553 0.35342899 0.4279734  0.29106251 0.28361532 0.38877757\n",
      " 0.3009304  0.35090821]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.37453442 0.36021149 0.32895082 0.39482679 0.37455378 0.41505303\n",
      " 0.33723114 0.3729592  0.33959168 0.3288647  0.33822618 0.41494017\n",
      " 0.39291553 0.35342899 0.4279734  0.29106251 0.28361532 0.38877757\n",
      " 0.3009304  0.35090821]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59691524 0.55487865 0.57032891 0.54429822 0.47677672 0.46083331\n",
      " 0.4759247  0.48375668 0.4756304  0.49670642 0.50390153 0.4734725\n",
      " 0.46236875 0.46156112 0.44874196 0.54955263 0.40477515 0.4328387\n",
      " 0.36609918 0.42024523]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59691524 0.55487865 0.57032891 0.54429822 0.47677672 0.46083331\n",
      " 0.4759247  0.48375668 0.4756304  0.49670642 0.50390153 0.4734725\n",
      " 0.46236875 0.46156112 0.44874196 0.54955263 0.40477515 0.4328387\n",
      " 0.36609918 0.42024523]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.39924268 0.42578782 0.43902182 0.39961227 0.37758922 0.36508865\n",
      " 0.46599518 0.386136   0.37131481 0.3625437  0.39526879 0.36801924\n",
      " 0.31698116 0.42380588 0.36125556 0.42681172 0.33620009 0.37271818\n",
      " 0.39784299 0.41455759]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.39924268 0.42578782 0.43902182 0.39961227 0.37758922 0.36508865\n",
      " 0.46599518 0.386136   0.37131481 0.3625437  0.39526879 0.36801924\n",
      " 0.31698116 0.42380588 0.36125556 0.42681172 0.33620009 0.37271818\n",
      " 0.39784299 0.41455759]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.60738432 0.56250233 0.56403444 0.63616077 0.62232358 0.54117287\n",
      " 0.56252908 0.55662638 0.5250448  0.60766101 0.52843352 0.55371316\n",
      " 0.55236756 0.57548554 0.54050642 0.56686803 0.53518594 0.49657037\n",
      " 0.52186339 0.58329837]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.60738432 0.56250233 0.56403444 0.63616077 0.62232358 0.54117287\n",
      " 0.56252908 0.55662638 0.5250448  0.60766101 0.52843352 0.55371316\n",
      " 0.55236756 0.57548554 0.54050642 0.56686803 0.53518594 0.49657037\n",
      " 0.52186339 0.58329837]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.58369044 0.65852157 0.58656938 0.63040897 0.61782381 0.61885036\n",
      " 0.62025966 0.6221202  0.62935056 0.55996013 0.61616496 0.6716965\n",
      " 0.62458025 0.54873769 0.59105302 0.61950817 0.6187705  0.61050674\n",
      " 0.63697518 0.64742412]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.58369044 0.65852157 0.58656938 0.63040897 0.61782381 0.61885036\n",
      " 0.62025966 0.6221202  0.62935056 0.55996013 0.61616496 0.6716965\n",
      " 0.62458025 0.54873769 0.59105302 0.61950817 0.6187705  0.61050674\n",
      " 0.63697518 0.64742412]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59179266 0.64106288 0.56258238 0.61785898 0.56373908 0.65267267\n",
      " 0.57118743 0.61596797 0.58658827 0.49662629 0.5847861  0.51791979\n",
      " 0.54913622 0.61831433 0.566115   0.55038674 0.58851987 0.59615587\n",
      " 0.49595943 0.52527413]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59179266 0.64106288 0.56258238 0.61785898 0.56373908 0.65267267\n",
      " 0.57118743 0.61596797 0.58658827 0.49662629 0.5847861  0.51791979\n",
      " 0.54913622 0.61831433 0.566115   0.55038674 0.58851987 0.59615587\n",
      " 0.49595943 0.52527413]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59919906 0.65005541 0.61035813 0.59120791 0.5445413  0.62194117\n",
      " 0.58729538 0.55704328 0.56625874 0.57574246 0.54989899 0.46468798\n",
      " 0.52471129 0.58481107 0.50587491 0.58431999 0.58895649 0.56390442\n",
      " 0.57119262 0.61430698]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59919906 0.65005541 0.61035813 0.59120791 0.5445413  0.62194117\n",
      " 0.58729538 0.55704328 0.56625874 0.57574246 0.54989899 0.46468798\n",
      " 0.52471129 0.58481107 0.50587491 0.58431999 0.58895649 0.56390442\n",
      " 0.57119262 0.61430698]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.68357266 0.65137014 0.66009517 0.60954121 0.57017571 0.58846927\n",
      " 0.55656424 0.55215941 0.56187794 0.60567707 0.51655488 0.53850614\n",
      " 0.48406389 0.51222974 0.59337147 0.60053084 0.53983615 0.52601383\n",
      " 0.5316623  0.47378483]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.68357266 0.65137014 0.66009517 0.60954121 0.57017571 0.58846927\n",
      " 0.55656424 0.55215941 0.56187794 0.60567707 0.51655488 0.53850614\n",
      " 0.48406389 0.51222974 0.59337147 0.60053084 0.53983615 0.52601383\n",
      " 0.5316623  0.47378483]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49234708 0.42898795 0.49936016 0.43318    0.42391899 0.38111025\n",
      " 0.47323488 0.46007549 0.44762687 0.47621981 0.44619875 0.46125554\n",
      " 0.45410368 0.42328123 0.45705636 0.35671727 0.46052263 0.43773429\n",
      " 0.44765614 0.40614411]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49234708 0.42898795 0.49936016 0.43318    0.42391899 0.38111025\n",
      " 0.47323488 0.46007549 0.44762687 0.47621981 0.44619875 0.46125554\n",
      " 0.45410368 0.42328123 0.45705636 0.35671727 0.46052263 0.43773429\n",
      " 0.44765614 0.40614411]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.77007356 0.62100611 0.66423655 0.69832247 0.68988692 0.68248332\n",
      " 0.70697604 0.75677257 0.6464915  0.71993576 0.74425697 0.66368035\n",
      " 0.67553082 0.62380026 0.64120396 0.61396696 0.69111693 0.71601845\n",
      " 0.69771641 0.62430726]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.77007356 0.62100611 0.66423655 0.69832247 0.68988692 0.68248332\n",
      " 0.70697604 0.75677257 0.6464915  0.71993576 0.74425697 0.66368035\n",
      " 0.67553082 0.62380026 0.64120396 0.61396696 0.69111693 0.71601845\n",
      " 0.69771641 0.62430726]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.67473122 0.70870786 0.69523514 0.69195818 0.67033096 0.69319517\n",
      " 0.65714968 0.5943572  0.58997829 0.56140741 0.59080752 0.59410207\n",
      " 0.68914983 0.58825331 0.59587967 0.57686085 0.59337599 0.56903381\n",
      " 0.59762046 0.58007053]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.67473122 0.70870786 0.69523514 0.69195818 0.67033096 0.69319517\n",
      " 0.65714968 0.5943572  0.58997829 0.56140741 0.59080752 0.59410207\n",
      " 0.68914983 0.58825331 0.59587967 0.57686085 0.59337599 0.56903381\n",
      " 0.59762046 0.58007053]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.48060008 0.44288867 0.47797135 0.47413762 0.43818854 0.44672508\n",
      " 0.44664055 0.46157356 0.40156432 0.40842045 0.42055306 0.44259215\n",
      " 0.4762328  0.47009992 0.4304043  0.43029608 0.38977981 0.4283711\n",
      " 0.43084428 0.42134659]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.48060008 0.44288867 0.47797135 0.47413762 0.43818854 0.44672508\n",
      " 0.44664055 0.46157356 0.40156432 0.40842045 0.42055306 0.44259215\n",
      " 0.4762328  0.47009992 0.4304043  0.43029608 0.38977981 0.4283711\n",
      " 0.43084428 0.42134659]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49681968 0.50321363 0.51514099 0.52412405 0.43766952 0.47592089\n",
      " 0.485978   0.44923977 0.48819818 0.44380078 0.47773346 0.44446376\n",
      " 0.46633006 0.42086985 0.51172857 0.46115185 0.50548424 0.4870627\n",
      " 0.46380833 0.51934562]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49681968 0.50321363 0.51514099 0.52412405 0.43766952 0.47592089\n",
      " 0.485978   0.44923977 0.48819818 0.44380078 0.47773346 0.44446376\n",
      " 0.46633006 0.42086985 0.51172857 0.46115185 0.50548424 0.4870627\n",
      " 0.46380833 0.51934562]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.462418   0.5488107  0.56471918 0.56385224 0.56405537 0.54499658\n",
      " 0.56157188 0.52226193 0.55084576 0.59200746 0.53983306 0.57212145\n",
      " 0.52951706 0.48901028 0.52436812 0.55104307 0.54324143 0.54375186\n",
      " 0.57222195 0.54938707]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.462418   0.5488107  0.56471918 0.56385224 0.56405537 0.54499658\n",
      " 0.56157188 0.52226193 0.55084576 0.59200746 0.53983306 0.57212145\n",
      " 0.52951706 0.48901028 0.52436812 0.55104307 0.54324143 0.54375186\n",
      " 0.57222195 0.54938707]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.55341647 0.57460037 0.56332696 0.55771919 0.48436904 0.45994638\n",
      " 0.49434353 0.54838869 0.52758923 0.51156725 0.49481669 0.45888965\n",
      " 0.52644609 0.44794402 0.41193281 0.53948354 0.48574052 0.47749193\n",
      " 0.48892616 0.4968846 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.55341647 0.57460037 0.56332696 0.55771919 0.48436904 0.45994638\n",
      " 0.49434353 0.54838869 0.52758923 0.51156725 0.49481669 0.45888965\n",
      " 0.52644609 0.44794402 0.41193281 0.53948354 0.48574052 0.47749193\n",
      " 0.48892616 0.4968846 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62256025 0.6291315  0.65246199 0.63238695 0.66272517 0.59273619\n",
      " 0.60841667 0.69287948 0.63713655 0.64914603 0.64765905 0.5778498\n",
      " 0.6175444  0.58611129 0.63924615 0.62244164 0.60496782 0.59447989\n",
      " 0.61630733 0.59217211]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62256025 0.6291315  0.65246199 0.63238695 0.66272517 0.59273619\n",
      " 0.60841667 0.69287948 0.63713655 0.64914603 0.64765905 0.5778498\n",
      " 0.6175444  0.58611129 0.63924615 0.62244164 0.60496782 0.59447989\n",
      " 0.61630733 0.59217211]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.5046367  0.47921337 0.53033337 0.54908503 0.48293229 0.47561334\n",
      " 0.46346274 0.51010577 0.43998253 0.50357942 0.51977821 0.48139912\n",
      " 0.55175597 0.5410231  0.52693736 0.52619824 0.54881895 0.5177773\n",
      " 0.51504549 0.51618311]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.5046367  0.47921337 0.53033337 0.54908503 0.48293229 0.47561334\n",
      " 0.46346274 0.51010577 0.43998253 0.50357942 0.51977821 0.48139912\n",
      " 0.55175597 0.5410231  0.52693736 0.52619824 0.54881895 0.5177773\n",
      " 0.51504549 0.51618311]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.63274701 0.63736721 0.66605391 0.65028304 0.62622925 0.57257794\n",
      " 0.6478841  0.6478841  0.68635559 0.67260618 0.65321009 0.66799149\n",
      " 0.65490202 0.68516997 0.68698309 0.66333209 0.72354968 0.62572925\n",
      " 0.6174836  0.61047791]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.63274701 0.63736721 0.66605391 0.65028304 0.62622925 0.57257794\n",
      " 0.6478841  0.6478841  0.68635559 0.67260618 0.65321009 0.66799149\n",
      " 0.65490202 0.68516997 0.68698309 0.66333209 0.72354968 0.62572925\n",
      " 0.6174836  0.61047791]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.65710654 0.59768834 0.64661043 0.64693562 0.6431623  0.62505001\n",
      " 0.61824512 0.59888122 0.56873945 0.59100032 0.61236648 0.62177252\n",
      " 0.60293806 0.59836454 0.59684256 0.62987816 0.56861937 0.61561227\n",
      " 0.54371278 0.63722011]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.65710654 0.59768834 0.64661043 0.64693562 0.6431623  0.62505001\n",
      " 0.61824512 0.59888122 0.56873945 0.59100032 0.61236648 0.62177252\n",
      " 0.60293806 0.59836454 0.59684256 0.62987816 0.56861937 0.61561227\n",
      " 0.54371278 0.63722011]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.58493324 0.5733779  0.56361015 0.53067454 0.48593965 0.40110285\n",
      " 0.52774515 0.42969182 0.54545473 0.45187889 0.56116264 0.44113613\n",
      " 0.50227977 0.35715338 0.42605823 0.57859339 0.41139967 0.46312458\n",
      " 0.38142228 0.34734695]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.58493324 0.5733779  0.56361015 0.53067454 0.48593965 0.40110285\n",
      " 0.52774515 0.42969182 0.54545473 0.45187889 0.56116264 0.44113613\n",
      " 0.50227977 0.35715338 0.42605823 0.57859339 0.41139967 0.46312458\n",
      " 0.38142228 0.34734695]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.4915207  0.46011063 0.47881101 0.48010473 0.52569142 0.51270872\n",
      " 0.45483455 0.48155522 0.47904857 0.51455882 0.49929822 0.46252715\n",
      " 0.54135543 0.45111676 0.43984734 0.4747692  0.47331254 0.4835691\n",
      " 0.44529575 0.47994138]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.4915207  0.46011063 0.47881101 0.48010473 0.52569142 0.51270872\n",
      " 0.45483455 0.48155522 0.47904857 0.51455882 0.49929822 0.46252715\n",
      " 0.54135543 0.45111676 0.43984734 0.4747692  0.47331254 0.4835691\n",
      " 0.44529575 0.47994138]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.41448111 0.5430177  0.38548295 0.52746552 0.36369186 0.42569612\n",
      " 0.5080194  0.36809104 0.40997533 0.42177448 0.46380625 0.41303805\n",
      " 0.41191441 0.43768626 0.40523935 0.42934178 0.31182655 0.38067043\n",
      " 0.38831233 0.50152783]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.41448111 0.5430177  0.38548295 0.52746552 0.36369186 0.42569612\n",
      " 0.5080194  0.36809104 0.40997533 0.42177448 0.46380625 0.41303805\n",
      " 0.41191441 0.43768626 0.40523935 0.42934178 0.31182655 0.38067043\n",
      " 0.38831233 0.50152783]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62154922 0.67774016 0.65123079 0.67725564 0.64902812 0.58244936\n",
      " 0.6669399  0.63863183 0.58559178 0.57271937 0.54384733 0.66066269\n",
      " 0.54760779 0.52978601 0.56257403 0.55495658 0.51425214 0.49110776\n",
      " 0.547244   0.55768565]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62154922 0.67774016 0.65123079 0.67725564 0.64902812 0.58244936\n",
      " 0.6669399  0.63863183 0.58559178 0.57271937 0.54384733 0.66066269\n",
      " 0.54760779 0.52978601 0.56257403 0.55495658 0.51425214 0.49110776\n",
      " 0.547244   0.55768565]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.54956939 0.63297627 0.63914695 0.60979582 0.63679183 0.51980298\n",
      " 0.61897783 0.63617314 0.50711208 0.50048402 0.53332016 0.51508655\n",
      " 0.49636786 0.46044792 0.50376668 0.4563964  0.55073172 0.51825238\n",
      " 0.57181371 0.56356784]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54956939 0.63297627 0.63914695 0.60979582 0.63679183 0.51980298\n",
      " 0.61897783 0.63617314 0.50711208 0.50048402 0.53332016 0.51508655\n",
      " 0.49636786 0.46044792 0.50376668 0.4563964  0.55073172 0.51825238\n",
      " 0.57181371 0.56356784]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.48298746 0.56442311 0.5668873  0.53783818 0.42773461 0.49480601\n",
      " 0.5612958  0.56154809 0.55434043 0.52384868 0.55673757 0.54163739\n",
      " 0.52489956 0.5442056  0.55726582 0.52749003 0.54768483 0.50625391\n",
      " 0.53149964 0.56005403]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.48298746 0.56442311 0.5668873  0.53783818 0.42773461 0.49480601\n",
      " 0.5612958  0.56154809 0.55434043 0.52384868 0.55673757 0.54163739\n",
      " 0.52489956 0.5442056  0.55726582 0.52749003 0.54768483 0.50625391\n",
      " 0.53149964 0.56005403]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.63370322 0.63731406 0.64032342 0.64032342 0.59946026 0.61300057\n",
      " 0.61257104 0.60561682 0.544234   0.51015633 0.45780929 0.45954564\n",
      " 0.52356659 0.59274525 0.53439124 0.57521895 0.47526611 0.4681912\n",
      " 0.57408741 0.60697683]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.63370322 0.63731406 0.64032342 0.64032342 0.59946026 0.61300057\n",
      " 0.61257104 0.60561682 0.544234   0.51015633 0.45780929 0.45954564\n",
      " 0.52356659 0.59274525 0.53439124 0.57521895 0.47526611 0.4681912\n",
      " 0.57408741 0.60697683]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # B.6 Long-Form Video Question Answering\n",
      "Compared ...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 1. Introduction\n",
      "Scaling up pre-trained language ...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 1. Introduction\n",
      "Recent advancements in Large Lan...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64687538 0.60007951 0.45377387 0.49310617 0.58296876 0.46126712\n",
      " 0.48501095 0.46720957 0.44046035 0.41728765 0.42407151 0.43328069\n",
      " 0.51740068 0.455691   0.41878337 0.50314258 0.46441885 0.50748803\n",
      " 0.53277165 0.49613214]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64687538 0.60007951 0.45377387 0.49310617 0.58296876 0.46126712\n",
      " 0.48501095 0.46720957 0.44046035 0.41728765 0.42407151 0.43328069\n",
      " 0.51740068 0.455691   0.41878337 0.50314258 0.46441885 0.50748803\n",
      " 0.53277165 0.49613214]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # B.6 Long-Form Video Question Answering\n",
      "Compared ...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.45864067 0.45287301 0.47237641 0.43442651 0.42956772 0.45956711\n",
      " 0.45321572 0.45636092 0.48734466 0.46649279 0.43799944 0.4671248\n",
      " 0.46895389 0.42116862 0.43851232 0.40845133 0.38625452 0.38925564\n",
      " 0.44885298 0.41026453]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.45864067 0.45287301 0.47237641 0.43442651 0.42956772 0.45956711\n",
      " 0.45321572 0.45636092 0.48734466 0.46649279 0.43799944 0.4671248\n",
      " 0.46895389 0.42116862 0.43851232 0.40845133 0.38625452 0.38925564\n",
      " 0.44885298 0.41026453]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3 Methodology\n",
      "\n",
      "# 3.1 Preliminary\n",
      "We start with a...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.63395795 0.58760696 0.64574566 0.58776377 0.56669845 0.45858055\n",
      " 0.48997528 0.48674241 0.42094278 0.44280838 0.43164646 0.40973501\n",
      " 0.40051675 0.48862778 0.46856056 0.41552188 0.48099089 0.51934213\n",
      " 0.57565321 0.41990314]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.63395795 0.58760696 0.64574566 0.58776377 0.56669845 0.45858055\n",
      " 0.48997528 0.48674241 0.42094278 0.44280838 0.43164646 0.40973501\n",
      " 0.40051675 0.48862778 0.46856056 0.41552188 0.48099089 0.51934213\n",
      " 0.57565321 0.41990314]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.47083681 0.49357202 0.50945436 0.47213032 0.49118147 0.47641687\n",
      " 0.46915666 0.48353984 0.4295696  0.4768966  0.4092517  0.47858356\n",
      " 0.4524625  0.39613    0.46141215 0.51738188 0.50562687 0.45403935\n",
      " 0.46842031 0.51010252]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.47083681 0.49357202 0.50945436 0.47213032 0.49118147 0.47641687\n",
      " 0.46915666 0.48353984 0.4295696  0.4768966  0.4092517  0.47858356\n",
      " 0.4524625  0.39613    0.46141215 0.51738188 0.50562687 0.45403935\n",
      " 0.46842031 0.51010252]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.40927219 0.3717631  0.48024247 0.47308104 0.4169331  0.48495166\n",
      " 0.4239453  0.34349218 0.35697069 0.44265122 0.43656733 0.42191826\n",
      " 0.36955658 0.32920604 0.37134127 0.46840869 0.38858157 0.31585244\n",
      " 0.40506248 0.33019801]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.40927219 0.3717631  0.48024247 0.47308104 0.4169331  0.48495166\n",
      " 0.4239453  0.34349218 0.35697069 0.44265122 0.43656733 0.42191826\n",
      " 0.36955658 0.32920604 0.37134127 0.46840869 0.38858157 0.31585244\n",
      " 0.40506248 0.33019801]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.68808423 0.6597435  0.50431966 0.53031589 0.6566175  0.51538123\n",
      " 0.43020062 0.4719316  0.47775029 0.54052456 0.49860521 0.49641279\n",
      " 0.5165961  0.51880579 0.44233255 0.51996694 0.47690634 0.48590713\n",
      " 0.47979971 0.48876249]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.68808423 0.6597435  0.50431966 0.53031589 0.6566175  0.51538123\n",
      " 0.43020062 0.4719316  0.47775029 0.54052456 0.49860521 0.49641279\n",
      " 0.5165961  0.51880579 0.44233255 0.51996694 0.47690634 0.48590713\n",
      " 0.47979971 0.48876249]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.78208387 0.70593906 0.66101492 0.62906843 0.73214923 0.53177576\n",
      " 0.55245473 0.48095035 0.48918134 0.57290055 0.5575835  0.474907\n",
      " 0.5167042  0.63833245 0.52487415 0.49173627 0.57092413 0.47122602\n",
      " 0.48266293 0.44348628]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.78208387 0.70593906 0.66101492 0.62906843 0.73214923 0.53177576\n",
      " 0.55245473 0.48095035 0.48918134 0.57290055 0.5575835  0.474907\n",
      " 0.5167042  0.63833245 0.52487415 0.49173627 0.57092413 0.47122602\n",
      " 0.48266293 0.44348628]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62013874 0.62263297 0.68721037 0.57984092 0.62539438 0.55614115\n",
      " 0.54974783 0.4914832  0.52049038 0.5611336  0.52385201 0.49648676\n",
      " 0.55263166 0.52099851 0.55092996 0.54331753 0.46100314 0.5051098\n",
      " 0.57974015 0.55291996]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62013874 0.62263297 0.68721037 0.57984092 0.62539438 0.55614115\n",
      " 0.54974783 0.4914832  0.52049038 0.5611336  0.52385201 0.49648676\n",
      " 0.55263166 0.52099851 0.55092996 0.54331753 0.46100314 0.5051098\n",
      " 0.57974015 0.55291996]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.6484541  0.65232317 0.54460183 0.50940304 0.54714026 0.51465889\n",
      " 0.44626013 0.51194886 0.46423672 0.48712825 0.5314228  0.50096726\n",
      " 0.53800937 0.51293143 0.46014521 0.49368889 0.45207958 0.50829529\n",
      " 0.53137678 0.49592906]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.6484541  0.65232317 0.54460183 0.50940304 0.54714026 0.51465889\n",
      " 0.44626013 0.51194886 0.46423672 0.48712825 0.5314228  0.50096726\n",
      " 0.53800937 0.51293143 0.46014521 0.49368889 0.45207958 0.50829529\n",
      " 0.53137678 0.49592906]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.71397416 0.64578955 0.69602579 0.6482702  0.61057231 0.6584681\n",
      " 0.74646026 0.62739638 0.60579197 0.59198232 0.62631586 0.62052421\n",
      " 0.65658459 0.59292757 0.60715556 0.63162559 0.60094594 0.61042371\n",
      " 0.59161248 0.63806232]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.71397416 0.64578955 0.69602579 0.6482702  0.61057231 0.6584681\n",
      " 0.74646026 0.62739638 0.60579197 0.59198232 0.62631586 0.62052421\n",
      " 0.65658459 0.59292757 0.60715556 0.63162559 0.60094594 0.61042371\n",
      " 0.59161248 0.63806232]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62723682 0.62984996 0.60057012 0.60001409 0.62174153 0.62174153\n",
      " 0.57682961 0.59861772 0.48325259 0.50569683 0.53684658 0.47405456\n",
      " 0.57283185 0.58405293 0.60968679 0.60055694 0.59092243 0.52505666\n",
      " 0.55586573 0.57364125]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62723682 0.62984996 0.60057012 0.60001409 0.62174153 0.62174153\n",
      " 0.57682961 0.59861772 0.48325259 0.50569683 0.53684658 0.47405456\n",
      " 0.57283185 0.58405293 0.60968679 0.60055694 0.59092243 0.52505666\n",
      " 0.55586573 0.57364125]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 7 CONCLUSION AND LIMITATION\n",
      "The impressive gener...\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.58801508 0.59709076 0.60491105 0.58426686 0.56291982 0.55069293\n",
      " 0.47587279 0.4996213  0.55682963 0.44365725 0.54453702 0.43341537\n",
      " 0.5641104  0.45504698 0.44816323 0.47538079 0.42981719 0.49785662\n",
      " 0.46122642 0.46848433]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.58801508 0.59709076 0.60491105 0.58426686 0.56291982 0.55069293\n",
      " 0.47587279 0.4996213  0.55682963 0.44365725 0.54453702 0.43341537\n",
      " 0.5641104  0.45504698 0.44816323 0.47538079 0.42981719 0.49785662\n",
      " 0.46122642 0.46848433]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 7. Conclusions, limitations, and future work\n",
      "Tra...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # DLIMITATION AND ETHICS CONCERNS\n",
      "\n",
      "# D.1 LIMITATIO...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 2 Impact of vision encoders for vision-language ...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # G. Ethic and Social Impact\n",
      "As brain decoding tec...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # G. Ethic and Social Impact\n",
      "As brain decoding tec...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 6. Ablation Studies\n",
      "We present a few selected ab...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # Acknowledgement\n",
      "We would like to thank Dr. Zuoli...\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 1. Introduction\n",
      "Recent advancements in Large Lan...\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 6. Ablation Studies\\nWe present a few selected ab...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# Acknowledgement\\nWe would like to thank Dr. Zuoli...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 1. Introduction\\nRecent advancements in Large Lan...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3.3PROPOSEDAPPROACH\n",
      "We hypothesize that error se...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 3.3. Self-distillation Training\n",
      "Although we are ...\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3.3. Self-distillation Training\\nAlthough we are ...']\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [13.328125]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 5.3 Generalized Zero-shot Learning\\nZero-shot lea...']\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.6875, 12.078125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 40.75 秒\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.21875, 19.703125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 40.75 秒\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [17.59375, 15.71875, 15.65625, 15.078125, 14.2734375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.125, 17.140625, 17.109375, 17.09375, 16.859375, 14.890625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 41.28 秒\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [17.203125, 16.9375, 15.546875, 15.4453125, 14.3828125, 13.109375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.54655923 0.60222976 0.50463516 0.53962589 0.54488494 0.50235975\n",
      " 0.5542812  0.56416584 0.55988077 0.58593003 0.51312498 0.57017874\n",
      " 0.53255304 0.53447632 0.56361511 0.59126113 0.         0.53932268\n",
      " 0.56230869 0.5387117 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54655923 0.60222976 0.50463516 0.53962589 0.54488494 0.50235975\n",
      " 0.5542812  0.56416584 0.55988077 0.58593003 0.51312498 0.57017874\n",
      " 0.53255304 0.53447632 0.56361511 0.59126113 0.         0.53932268\n",
      " 0.56230869 0.5387117 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 7. Conclusions, limitations, and future work\\nTra...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# G. Ethic and Social Impact\\nAs brain decoding tec...', '# DLIMITATION AND ETHICS CONCERNS\\n\\n# D.1 LIMITATIO...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 2 Impact of vision encoders for vision-language ...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# G. Ethic and Social Impact\\nAs brain decoding tec...']\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.498053   0.47238075 0.4870452  0.48123724 0.52783563 0.46551087\n",
      " 0.48201007 0.56394869 0.50720084 0.52121579 0.59748834 0.48149618\n",
      " 0.53901947 0.52817056 0.52209117 0.52520526 0.         0.5182336\n",
      " 0.4609284  0.44655656]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.498053   0.47238075 0.4870452  0.48123724 0.52783563 0.46551087\n",
      " 0.48201007 0.56394869 0.50720084 0.52121579 0.59748834 0.48149618\n",
      " 0.53901947 0.52817056 0.52209117 0.52520526 0.         0.5182336\n",
      " 0.4609284  0.44655656]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.47848709 0.48834976 0.41865248 0.47528535 0.51388397 0.47043775\n",
      " 0.4501877  0.51188252 0.44517443 0.53076291 0.53716751 0.47409159\n",
      " 0.50482074 0.50036389 0.4747059  0.40579776 0.51789962 0.\n",
      " 0.45920816 0.42598264]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.47848709 0.48834976 0.41865248 0.47528535 0.51388397 0.47043775\n",
      " 0.4501877  0.51188252 0.44517443 0.53076291 0.53716751 0.47409159\n",
      " 0.50482074 0.50036389 0.4747059  0.40579776 0.51789962 0.\n",
      " 0.45920816 0.42598264]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.59798247 0.56338591 0.51625093 0.51333862 0.57541256 0.47811551\n",
      " 0.48681262 0.48623248 0.51336609 0.56403843 0.45837145 0.53790481\n",
      " 0.49840449 0.46667904 0.47770716 0.50505834 0.         0.47469798\n",
      " 0.47412962 0.4263471 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.59798247 0.56338591 0.51625093 0.51333862 0.57541256 0.47811551\n",
      " 0.48681262 0.48623248 0.51336609 0.56403843 0.45837145 0.53790481\n",
      " 0.49840449 0.46667904 0.47770716 0.50505834 0.         0.47469798\n",
      " 0.47412962 0.4263471 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.49640957 0.53628337 0.54991269 0.51952336 0.52072742 0.52033058\n",
      " 0.55579058 0.54193276 0.49067951 0.49416774 0.5374195  0.50638198\n",
      " 0.51206533 0.5469578  0.48657504 0.43023694 0.46570415 0.56090829\n",
      " 0.51241539 0.        ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.49640957 0.53628337 0.54991269 0.51952336 0.52072742 0.52033058\n",
      " 0.55579058 0.54193276 0.49067951 0.49416774 0.5374195  0.50638198\n",
      " 0.51206533 0.5469578  0.48657504 0.43023694 0.46570415 0.56090829\n",
      " 0.51241539 0.        ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.54645261 0.50123555 0.         0.50735814 0.5274591  0.46139651\n",
      " 0.51689686 0.46080339 0.50750658 0.47508363 0.45158347 0.53551193\n",
      " 0.45812799 0.47928602 0.51760245 0.49416566 0.45967973 0.49595064\n",
      " 0.50812809 0.50765247]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54645261 0.50123555 0.         0.50735814 0.5274591  0.46139651\n",
      " 0.51689686 0.46080339 0.50750658 0.47508363 0.45158347 0.53551193\n",
      " 0.45812799 0.47928602 0.51760245 0.49416566 0.45967973 0.49595064\n",
      " 0.50812809 0.50765247]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.25, 19.109375, 18.875, 18.234375, 18.171875, 17.5625, 17.125, 16.6875, 16.65625, 16.3125, 15.765625, 15.703125, 15.5, 15.140625, 14.71875, 14.609375, 14.0234375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 42.15 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.875, 17.21875, 16.59375, 16.34375, 16.203125, 16.171875, 15.09375, 14.6875]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.53125, 22.953125, 22.640625, 22.625, 22.453125, 22.09375, 21.5, 20.96875, 20.84375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 42.79 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.359375, 19.25, 18.75, 18.0625, 17.984375, 17.953125, 17.875, 15.828125, 15.359375, 13.8984375, 13.359375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 42.76 秒\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 1. Introduction\\nScaling up pre-trained language ...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# Knowledge Consolidation\\nTo further combat catast...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# B.6 Long-Form Video Question Answering\\nCompared ...']\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.44849311 0.4549236  0.46194667 0.42880775 0.38397446 0.40113905\n",
      " 0.399637   0.41067535 0.43056894 0.39824882 0.40895566 0.\n",
      " 0.36468611 0.4269294  0.37666382 0.37742853 0.34116952 0.33881917\n",
      " 0.43130637 0.33575511]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.44849311 0.4549236  0.46194667 0.42880775 0.38397446 0.40113905\n",
      " 0.399637   0.41067535 0.43056894 0.39824882 0.40895566 0.\n",
      " 0.36468611 0.4269294  0.37666382 0.37742853 0.34116952 0.33881917\n",
      " 0.43130637 0.33575511]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.46140233 0.41839416 0.43940838 0.46786948 0.46215414 0.45029413\n",
      " 0.43932852 0.42431845 0.44831992 0.4509225  0.40389013 0.41311489\n",
      " 0.37360364 0.         0.         0.42112518 0.37175481 0.42732566\n",
      " 0.43417817 0.46316058]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.46140233 0.41839416 0.43940838 0.46786948 0.46215414 0.45029413\n",
      " 0.43932852 0.42431845 0.44831992 0.4509225  0.40389013 0.41311489\n",
      " 0.37360364 0.         0.         0.42112518 0.37175481 0.42732566\n",
      " 0.43417817 0.46316058]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 7 CONCLUSION AND LIMITATION\\nThe impressive gener...']\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.74255526 0.69737392 0.6111799  0.67822699 0.68733565 0.\n",
      " 0.55606334 0.62168164 0.57269506 0.57359598 0.58852145 0.60835424\n",
      " 0.54959009 0.64890097 0.64420236 0.62001145 0.58499806 0.488133\n",
      " 0.57041201 0.55976853]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.74255526 0.69737392 0.6111799  0.67822699 0.68733565 0.\n",
      " 0.55606334 0.62168164 0.57269506 0.57359598 0.58852145 0.60835424\n",
      " 0.54959009 0.64890097 0.64420236 0.62001145 0.58499806 0.488133\n",
      " 0.57041201 0.55976853]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.55372983 0.47188852 0.5105638  0.47934274 0.4784495  0.48136638\n",
      " 0.49041954 0.53346387 0.53312477 0.53267429 0.46276256 0.\n",
      " 0.48159583 0.55105941 0.50195603 0.5537791  0.46273759 0.48719363\n",
      " 0.53882499 0.46318719]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.55372983 0.47188852 0.5105638  0.47934274 0.4784495  0.48136638\n",
      " 0.49041954 0.53346387 0.53312477 0.53267429 0.46276256 0.\n",
      " 0.48159583 0.55105941 0.50195603 0.5537791  0.46273759 0.48719363\n",
      " 0.53882499 0.46318719]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.52264911 0.51038283 0.5807732  0.54753988 0.5756087  0.51077569\n",
      " 0.5002241  0.49816358 0.47653585 0.49627756 0.52165707 0.5344443\n",
      " 0.55245516 0.         0.52640793 0.51400621 0.55409277 0.53191722\n",
      " 0.5840847  0.53161018]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.52264911 0.51038283 0.5807732  0.54753988 0.5756087  0.51077569\n",
      " 0.5002241  0.49816358 0.47653585 0.49627756 0.52165707 0.5344443\n",
      " 0.55245516 0.         0.52640793 0.51400621 0.55409277 0.53191722\n",
      " 0.5840847  0.53161018]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# B.6 Long-Form Video Question Answering\\nCompared ...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 6 Corpus Genre\\nLarge LMs are typically pre-train...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 1. Introduction\\nRecent advancements in Large Lan...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...', '# Knowledge Consolidation\\nTo further combat catast...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 1, 新处理: 19, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# BSynthetic Example\\nContinuing from Sec. 3.1, we ...', '# The non-robust feature from pre-trained model\\nTh...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# The non-robust feature from pre-trained model\\nTh...', '# BSynthetic Example\\nContinuing from Sec. 3.1, we ...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 6.4 Interpretability of Diagnostic Results\n",
      "The s...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.171875, 17.546875, 15.4140625, 15.03125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 43.90 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.234375, 19.265625, 19.203125, 18.75, 17.984375, 17.765625, 17.65625, 17.625, 15.1015625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 43.80 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.953125, 20.703125, 20.09375, 20.0625, 19.0625, 19.015625, 18.53125, 18.375, 17.421875, 16.890625, 16.796875, 15.3125, 15.1015625, 13.6953125, 13.390625]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 43.91 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.59375, 18.34375, 18.3125, 18.1875, 17.171875, 17.015625, 15.890625, 15.7109375, 13.921875, 13.671875, 13.640625, 13.171875, 13.0859375, 11.09375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 43.97 秒\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64489303 0.67095571 0.62990967 0.62339437 0.64849731 0.59085881\n",
      " 0.60610222 0.65893351 0.         0.62975014 0.63698942 0.6396482\n",
      " 0.58555655 0.65608391 0.60480025 0.61268502 0.6650787  0.7130711\n",
      " 0.61836583 0.6490278 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64489303 0.67095571 0.62990967 0.62339437 0.64849731 0.59085881\n",
      " 0.60610222 0.65893351 0.         0.62975014 0.63698942 0.6396482\n",
      " 0.58555655 0.65608391 0.60480025 0.61268502 0.6650787  0.7130711\n",
      " 0.61836583 0.6490278 ]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64889501 0.67272526 0.55982621 0.60040079 0.53033115 0.58367115\n",
      " 0.         0.57107177 0.53608993 0.56784735 0.557735   0.57096873\n",
      " 0.53255979 0.54146293 0.49703319 0.56062638 0.5728786  0.49737044\n",
      " 0.51014222 0.47833422]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64889501 0.67272526 0.55982621 0.60040079 0.53033115 0.58367115\n",
      " 0.         0.57107177 0.53608993 0.56784735 0.557735   0.57096873\n",
      " 0.53255979 0.54146293 0.49703319 0.56062638 0.5728786  0.49737044\n",
      " 0.51014222 0.47833422]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.47026278 0.41034783 0.         0.44698285 0.48971959 0.5798129\n",
      " 0.54067044 0.50194771 0.50849758 0.47070687 0.40387934 0.50591954\n",
      " 0.54889789 0.60786024 0.47764489 0.42027263 0.44137754 0.50826234\n",
      " 0.52249361 0.51756879]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.47026278 0.41034783 0.         0.44698285 0.48971959 0.5798129\n",
      " 0.54067044 0.50194771 0.50849758 0.47070687 0.40387934 0.50591954\n",
      " 0.54889789 0.60786024 0.47764489 0.42027263 0.44137754 0.50826234\n",
      " 0.52249361 0.51756879]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3 Methodology\\n\\n# 3.1 Preliminary\\nWe start with a...', '# 2 Impact of vision encoders for vision-language ...']\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 3.3.3 LINEAR TRANSFORMER\\nRecent research by Schl...', '# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...']\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.203125, 21.5625, 21.515625, 21.46875, 21.140625, 21.046875, 21.015625, 20.296875, 20.1875, 19.984375, 19.875, 19.859375, 19.796875, 19.75, 19.484375, 19.3125, 19.234375, 19.109375, 19.0625, 18.640625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 44.00 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.25, 21.015625, 20.8125, 20.578125, 19.359375, 19.28125, 19.15625, 18.890625, 18.828125, 18.5625, 18.265625, 17.046875, 16.0]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 44.04 秒\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.64161352 0.65161485 0.56426259 0.58759737 0.64637903 0.62267101\n",
      " 0.60240452 0.63981706 0.         0.65270157 0.63493328 0.63616323\n",
      " 0.56253872 0.65116469 0.59491256 0.63903647 0.64219076 0.64097846\n",
      " 0.67364867 0.65124066]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.64161352 0.65161485 0.56426259 0.58759737 0.64637903 0.62267101\n",
      " 0.60240452 0.63981706 0.         0.65270157 0.63493328 0.63616323\n",
      " 0.56253872 0.65116469 0.59491256 0.63903647 0.64219076 0.64097846\n",
      " 0.67364867 0.65124066]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.76173358 0.77862927 0.66490818 0.71546344 0.64395497 0.69788572\n",
      " 0.69743879 0.         0.63739847 0.64097844 0.64560768 0.66806821\n",
      " 0.65338557 0.61875164 0.6740005  0.67294419 0.58966138 0.640101\n",
      " 0.63274397 0.62034085]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.76173358 0.77862927 0.66490818 0.71546344 0.64395497 0.69788572\n",
      " 0.69743879 0.         0.63739847 0.64097844 0.64560768 0.66806821\n",
      " 0.65338557 0.61875164 0.6740005  0.67294419 0.58966138 0.640101\n",
      " 0.63274397 0.62034085]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53062816 0.5376848  0.55039996 0.         0.53147278 0.54359745\n",
      " 0.59382438 0.49353677 0.51846909 0.56492542 0.51076609 0.60254265\n",
      " 0.54335965 0.53262152 0.55956775 0.53790527 0.51043712 0.54250559\n",
      " 0.50962287 0.55092269]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53062816 0.5376848  0.55039996 0.         0.53147278 0.54359745\n",
      " 0.59382438 0.49353677 0.51846909 0.56492542 0.51076609 0.60254265\n",
      " 0.54335965 0.53262152 0.55956775 0.53790527 0.51043712 0.54250559\n",
      " 0.50962287 0.55092269]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.51975811 0.44506006 0.46130007 0.55760652 0.47453906 0.51334435\n",
      " 0.         0.46324787 0.48294203 0.49196188 0.49494204 0.46464598\n",
      " 0.48885793 0.4582653  0.45003986 0.50820747 0.51585    0.45603766\n",
      " 0.38218435 0.50951722]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.51975811 0.44506006 0.46130007 0.55760652 0.47453906 0.51334435\n",
      " 0.         0.46324787 0.48294203 0.49196188 0.49494204 0.46464598\n",
      " 0.48885793 0.4582653  0.45003986 0.50820747 0.51585    0.45603766\n",
      " 0.38218435 0.50951722]\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53069551 0.5932864  0.4614238  0.         0.57067407 0.5287897\n",
      " 0.51941295 0.55283923 0.         0.62513276 0.57204207 0.51853085\n",
      " 0.47989564 0.5102562  0.57850652 0.47666241 0.55956535 0.57311107\n",
      " 0.52657086 0.48554726]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53069551 0.5932864  0.4614238  0.         0.57067407 0.5287897\n",
      " 0.51941295 0.55283923 0.         0.62513276 0.57204207 0.51853085\n",
      " 0.47989564 0.5102562  0.57850652 0.47666241 0.55956535 0.57311107\n",
      " 0.52657086 0.48554726]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.77339061 0.         0.686341   0.65257106 0.65287334 0.68004586\n",
      " 0.6905552  0.65985927 0.6171187  0.57866669 0.         0.59565308\n",
      " 0.61613475 0.60503979 0.64226299 0.57729678 0.65191142 0.64442033\n",
      " 0.64650041 0.61617649]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.77339061 0.         0.686341   0.65257106 0.65287334 0.68004586\n",
      " 0.6905552  0.65985927 0.6171187  0.57866669 0.         0.59565308\n",
      " 0.61613475 0.60503979 0.64226299 0.57729678 0.65191142 0.64442033\n",
      " 0.64650041 0.61617649]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [16.234375, 15.828125, 15.7109375, 14.3359375, 14.1015625, 13.1875]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.67389271 0.         0.48580034 0.58123125 0.56299886 0.62218778\n",
      " 0.         0.53722707 0.50555746 0.47676404 0.53246066 0.50850996\n",
      " 0.45264091 0.48954645 0.56624837 0.4675413  0.52598    0.47491965\n",
      " 0.51981706 0.58453833]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.67389271 0.         0.48580034 0.58123125 0.56299886 0.62218778\n",
      " 0.         0.53722707 0.50555746 0.47676404 0.53246066 0.50850996\n",
      " 0.45264091 0.48954645 0.56624837 0.4675413  0.52598    0.47491965\n",
      " 0.51981706 0.58453833]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.21875, 20.109375, 20.015625, 19.90625, 19.8125, 19.796875, 19.453125, 19.28125, 19.234375, 19.109375, 18.90625, 18.828125, 18.4375, 16.5]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 44.69 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.546875, 20.328125, 17.96875, 17.125, 17.046875, 16.984375, 16.375, 16.21875, 16.03125, 16.0, 15.8984375, 15.6953125, 15.2421875, 15.015625, 14.5703125, 13.8984375, 13.3125, 13.015625, 12.7421875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 44.66 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [17.4375, 16.5]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.21875, 19.453125, 19.171875, 19.078125, 18.765625, 18.59375, 18.453125, 18.109375, 18.078125, 17.765625, 17.25, 16.5625, 16.1875, 15.484375, 15.4375, 14.96875, 13.8046875, 13.4453125, 12.859375, 12.796875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.34 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.265625, 20.09375, 18.875, 18.421875, 18.078125, 17.921875, 17.421875, 16.75, 16.609375, 16.5, 15.390625, 15.203125, 14.6328125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.32 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.015625, 21.453125, 21.4375, 21.4375, 21.4375, 21.328125, 21.25, 21.03125, 20.8125, 20.671875, 20.03125, 19.890625, 19.78125, 19.71875, 19.640625, 18.953125, 18.734375, 18.34375, 17.0]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.29 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.859375, 19.71875, 19.578125, 19.515625, 17.796875, 14.875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.11 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.890625, 19.859375, 19.515625, 17.890625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.11 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.96875, 20.65625, 20.15625, 19.5, 19.15625, 18.921875, 18.421875, 18.1875, 18.15625, 18.140625, 18.109375, 17.6875, 17.046875, 17.0, 16.875, 15.953125, 15.765625, 15.546875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.22 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.796875, 20.171875, 20.140625, 20.140625, 20.125, 20.09375, 19.5625, 19.46875, 19.4375, 19.40625, 18.859375, 18.5625, 18.453125, 18.296875, 18.09375, 17.875, 17.640625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.23 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.890625, 20.84375, 20.71875, 20.6875, 20.578125, 20.546875, 20.09375, 20.0625, 19.8125, 19.6875, 19.671875, 19.5, 19.328125, 19.296875, 19.234375, 19.125, 19.125, 19.078125, 18.90625, 18.734375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.32 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.3125, 17.640625, 17.5, 17.4375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.34 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.71875, 22.046875, 22.015625, 21.828125, 21.1875, 21.0625, 20.8125, 20.78125, 20.75, 20.3125, 19.984375, 19.765625, 19.75, 19.671875, 19.625, 19.421875, 19.078125, 18.546875, 18.5, 18.0625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.19 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.59375, 19.453125, 17.234375, 15.828125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.10 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.3125, 19.203125, 18.453125, 17.8125, 17.5625, 17.53125, 17.53125, 17.296875, 17.234375, 17.140625, 16.921875, 16.796875, 16.6875, 16.390625, 16.234375, 15.640625, 14.9296875, 14.875, 14.8359375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.22 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.703125, 19.984375, 19.515625, 18.96875, 18.8125, 18.640625, 17.578125, 17.3125, 16.75, 16.421875, 16.328125, 16.28125, 16.25, 16.1875, 15.9453125, 15.5703125, 15.4140625, 15.40625, 14.765625, 13.9921875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.33 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.09375, 17.453125, 17.4375, 17.296875, 17.125, 16.75, 16.71875, 16.5, 15.9765625, 15.609375, 15.4921875, 15.2890625, 15.0390625, 14.75, 14.5625, 13.96875, 13.5859375, 13.1171875]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [25.328125, 24.0, 22.1875, 21.9375, 19.265625, 19.234375, 19.03125, 18.640625, 18.515625, 18.0, 17.875, 17.359375, 17.28125, 16.859375, 16.546875, 16.53125, 16.375, 15.7109375, 15.4375, 14.921875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.96 秒\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.62547779 0.61890131 0.49226421 0.56641624 0.48792357 0.48864805\n",
      " 0.47380544 0.         0.58727522 0.48576275 0.56891242 0.54277375\n",
      " 0.45704776 0.         0.44557131 0.50587151 0.54032703 0.48884621\n",
      " 0.48800715 0.43632302]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.62547779 0.61890131 0.49226421 0.56641624 0.48792357 0.48864805\n",
      " 0.47380544 0.         0.58727522 0.48576275 0.56891242 0.54277375\n",
      " 0.45704776 0.         0.44557131 0.50587151 0.54032703 0.48884621\n",
      " 0.48800715 0.43632302]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.53664382 0.61393549 0.56759277 0.57716525 0.61004228 0.5664192\n",
      " 0.57755867 0.         0.56483007 0.55649589 0.59316042 0.\n",
      " 0.58891539 0.52807141 0.52074697 0.51653058 0.50414439 0.53573099\n",
      " 0.53387871 0.57181786]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53664382 0.61393549 0.56759277 0.57716525 0.61004228 0.5664192\n",
      " 0.57755867 0.         0.56483007 0.55649589 0.59316042 0.\n",
      " 0.58891539 0.52807141 0.52074697 0.51653058 0.50414439 0.53573099\n",
      " 0.53387871 0.57181786]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.671875, 19.15625, 18.96875, 18.75, 18.4375, 18.34375, 17.875, 17.75, 17.515625, 17.515625, 17.5, 17.265625, 17.25, 15.3671875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.95 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.359375, 21.15625, 20.6875, 18.796875, 18.78125, 18.3125, 18.078125, 17.921875, 17.8125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 45.99 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.09375, 17.203125, 17.0625, 15.765625]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.328125, 17.734375, 17.734375, 17.296875, 17.140625, 16.75, 16.640625, 16.609375, 16.09375, 16.078125, 15.828125, 15.578125, 15.1953125, 14.9453125, 14.515625, 13.859375, 12.625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.52 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.9375, 19.96875, 19.9375, 19.671875, 19.578125, 19.234375, 18.171875, 17.625, 17.59375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.24 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.703125, 20.078125, 19.78125, 19.5625, 19.203125, 18.71875, 18.59375, 18.28125, 13.6171875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.28 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.265625, 21.6875, 21.25, 21.21875, 20.9375, 19.984375, 19.6875, 19.359375, 19.34375, 19.296875, 19.015625, 18.59375, 18.484375, 18.46875, 18.390625, 18.25, 18.234375, 18.140625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.38 秒\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 2\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 5 EXPERIMENTS AND RESULTS\\n\\n# 5.1 EXPERIMENTAL RE...', '# 6.4 Interpretability of Diagnostic Results\\nThe s...']\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.625, 21.3125, 20.34375, 17.84375, 15.9453125, 15.609375, 15.1328125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.28 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.796875, 21.640625, 20.859375, 16.890625, 16.796875, 16.796875, 15.75, 15.6953125, 15.5078125, 14.8984375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.33 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.671875, 17.71875, 16.578125, 15.265625, 15.1171875, 14.9453125, 14.8046875, 14.78125, 14.6953125, 14.59375, 14.3125, 14.265625, 14.2421875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.33 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.640625, 21.6875, 21.375, 21.359375, 20.703125, 20.625, 20.609375, 20.53125, 19.96875, 18.921875, 18.5, 18.28125, 18.171875, 18.09375, 17.90625, 17.78125, 17.03125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.37 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.90625, 19.421875, 19.171875, 18.84375, 18.53125, 18.296875, 18.25, 17.96875, 17.109375, 16.609375, 16.375, 16.328125, 15.953125, 15.796875, 14.078125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.50 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.8125, 19.390625, 19.296875, 18.90625, 18.390625, 16.875, 15.90625, 15.90625, 15.828125, 14.71875, 13.484375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.50 秒\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.41802725 0.38253897 0.39968688 0.39740475 0.         0.33802572\n",
      " 0.39711678 0.38044325 0.40486109 0.         0.41288446 0.3756238\n",
      " 0.4216295  0.32490324 0.33593363 0.43330865 0.35051012 0.39697726\n",
      " 0.42659227 0.39177245]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.41802725 0.38253897 0.39968688 0.39740475 0.         0.33802572\n",
      " 0.39711678 0.38044325 0.40486109 0.         0.41288446 0.3756238\n",
      " 0.4216295  0.32490324 0.33593363 0.43330865 0.35051012 0.39697726\n",
      " 0.42659227 0.39177245]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.71875, 19.703125, 19.34375, 19.328125, 19.25, 19.140625, 18.703125, 18.671875, 18.5625, 18.296875, 18.109375, 18.046875, 18.0, 17.9375, 17.859375, 17.65625, 17.109375, 16.421875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.86 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.921875, 20.921875, 20.796875, 20.390625, 20.125, 19.5, 19.421875, 19.359375, 19.34375, 19.34375, 19.171875, 17.859375, 17.609375, 17.609375, 14.5390625, 13.15625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.92 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.046875, 20.90625, 20.90625, 20.765625, 20.21875, 19.671875, 19.65625, 19.390625, 19.265625, 19.15625, 19.015625, 18.46875, 17.828125, 17.765625, 17.75, 16.875, 16.21875, 14.9453125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.90 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.9375, 17.828125, 17.78125, 17.5, 17.5, 17.015625, 16.953125, 16.640625, 16.640625, 16.21875, 15.828125, 15.4609375, 15.4296875, 15.3125, 15.1328125, 15.1015625, 14.8515625, 14.46875, 14.421875, 14.1015625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.94 秒\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.96875, 20.296875, 20.15625, 18.953125, 18.921875, 18.859375, 12.890625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.88 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.234375, 20.015625, 19.984375, 19.015625, 18.53125, 18.53125, 18.484375, 18.453125, 18.1875, 17.578125, 17.5625, 17.46875, 17.390625, 17.015625, 17.0, 16.515625, 16.359375, 16.28125, 16.1875, 14.6171875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 47.14 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.890625, 19.453125, 19.203125, 19.09375, 18.140625, 17.78125, 17.5625, 16.484375, 14.7265625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 46.96 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [17.921875, 17.84375, 17.734375, 17.46875, 17.40625, 17.046875, 16.953125, 16.890625, 16.59375, 16.125, 15.90625, 14.84375, 13.953125, 13.171875]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "ERROR:research_agent.core.embedding_model:Rerank请求失败: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.125, 23.203125, 21.3125, 21.140625, 19.984375, 19.984375, 19.96875, 19.046875, 18.96875, 18.6875, 18.671875, 18.375, 17.765625, 17.71875, 16.828125, 16.78125, 16.71875, 15.71875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 47.50 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.3125, 16.953125, 16.0625, 15.5625, 15.09375, 15.078125, 15.0234375, 14.8984375, 14.828125, 14.65625, 14.546875, 14.5234375, 13.8671875, 13.484375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "ERROR:research_agent.core.reference_processor:重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.828125, 20.1875, 19.8125, 19.703125, 19.53125, 19.09375, 19.03125, 18.578125, 18.234375, 18.21875, 18.21875, 18.21875, 18.046875, 17.828125, 17.65625, 17.578125, 17.359375, 17.1875, 16.765625, 16.078125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 47.90 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.390625, 20.078125, 19.96875, 19.578125, 19.546875, 19.046875, 19.046875, 18.96875, 18.859375, 18.796875, 18.71875, 18.65625, 18.296875, 17.90625, 17.875, 17.75, 17.546875, 17.46875, 17.140625, 15.7734375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 47.90 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.5625, 24.5625, 23.078125, 22.625, 22.609375, 22.0625, 20.3125, 20.0625, 19.875, 19.765625, 19.75, 19.390625, 19.21875, 18.953125, 18.75, 18.171875, 16.203125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.16 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.65625, 18.640625, 18.3125, 17.625, 17.390625, 17.25, 17.1875, 17.109375, 16.84375, 16.828125, 16.625, 16.5625, 16.3125, 16.25, 16.234375, 16.0, 15.796875, 15.4921875, 13.875, 13.4140625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.16 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.9375, 20.328125, 20.15625, 20.125, 19.703125, 19.59375, 19.59375, 19.4375, 19.421875, 19.359375, 18.765625, 18.15625, 18.125, 18.015625, 17.484375, 17.453125, 17.421875, 16.90625, 16.640625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.70 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.96875, 21.09375, 20.453125, 20.25, 20.203125, 20.0625, 20.0, 19.9375, 18.765625, 18.546875, 18.21875, 17.953125, 17.6875, 17.296875, 17.234375, 17.15625, 16.734375, 16.421875, 15.765625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.75 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.03125, 18.578125, 18.40625, 18.265625, 18.140625, 18.09375, 18.078125, 18.078125, 18.015625, 17.984375, 17.9375, 17.921875, 17.71875, 17.546875, 17.40625, 17.3125, 16.234375, 15.8671875, 15.828125, 14.53125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.68 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.125, 21.046875, 19.984375, 19.75, 19.484375, 19.28125, 19.234375, 19.109375, 17.421875, 17.046875, 16.8125, 16.78125, 16.359375, 16.296875, 16.171875, 15.90625, 15.71875, 15.6015625, 15.3046875, 14.3515625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.74 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [23.5, 23.390625, 22.90625, 22.734375, 22.53125, 22.359375, 22.15625, 22.125, 22.078125, 21.671875, 21.28125, 21.171875, 20.484375, 20.328125, 20.3125, 20.28125, 20.203125, 20.15625, 19.3125, 18.25]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 48.85 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.375, 17.890625, 17.5625, 17.1875, 16.5, 16.28125, 15.7265625, 14.390625, 14.1796875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 49.81 秒\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.859375, 18.984375, 18.4375, 18.359375, 17.78125, 16.34375, 16.015625, 15.984375, 15.78125, 15.765625, 15.734375, 15.640625, 15.640625, 14.1015625, 14.0234375, 13.1484375, 12.7421875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.15 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.078125, 19.0625, 19.0, 18.9375, 18.671875, 18.34375, 18.28125, 17.96875, 17.765625, 17.609375, 17.59375, 17.578125, 17.109375, 16.984375, 16.90625, 16.75, 16.265625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 49.97 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.1875, 22.6875, 22.125, 20.109375, 18.234375, 17.953125, 17.171875, 17.140625, 16.390625, 16.203125, 16.1875, 16.109375, 16.078125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 49.89 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.59375, 19.5625, 19.03125, 18.28125, 18.234375, 18.0, 17.828125, 17.5625, 17.484375, 17.46875, 16.96875, 16.953125, 16.953125, 16.875, 16.84375, 16.765625, 16.625, 16.59375, 16.46875, 16.203125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.17 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [22.328125, 20.34375, 20.328125, 19.28125, 18.5625, 18.453125, 18.265625, 17.78125, 17.3125, 17.140625, 16.90625, 16.859375, 16.328125, 16.3125, 15.8125, 14.8046875, 14.3125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 49.94 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 12, 缓存命中: 4, 新处理: 8, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.35764593 0.41487906 0.37601837 0.3210783  0.38251047 0.34132129\n",
      " 0.37683783 0.32454643 0.36037259 0.3933302  0.36078283 0.32307773]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.35764593 0.41487906 0.37601837 0.3210783  0.38251047 0.34132129\n",
      " 0.37683783 0.32454643 0.36037259 0.3933302  0.36078283 0.32307773]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.51 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.328125, 21.296875, 21.21875, 21.171875, 21.03125, 19.75, 19.234375, 18.234375, 17.125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.69 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.046875, 18.078125, 17.703125, 16.75, 16.109375, 15.2734375, 14.6484375, 14.6015625, 14.2734375, 13.96875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.70 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.265625, 19.8125, 19.015625, 18.46875, 18.421875, 17.90625, 16.59375, 15.921875, 15.78125, 14.9921875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.62 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.671875, 20.265625, 20.03125, 20.0, 19.796875, 19.671875, 19.4375, 19.40625, 19.359375, 19.125, 18.796875, 18.4375, 18.3125, 18.1875, 17.796875, 17.734375, 17.578125, 17.234375, 16.5]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 50.70 秒\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.859375, 19.265625, 18.34375, 18.3125, 17.625, 17.59375, 17.453125, 17.21875, 16.515625, 16.046875, 15.96875, 15.8046875, 15.6484375, 15.2734375, 14.921875, 14.7578125, 14.4609375, 13.5390625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 51.12 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.96875, 22.84375, 22.421875, 20.6875, 20.578125, 20.4375, 20.25, 20.109375, 20.078125, 19.5, 18.84375, 18.828125, 18.703125, 18.453125, 17.921875, 17.53125, 17.4375, 16.65625, 16.390625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 51.11 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 20, 缓存命中: 6, 新处理: 14, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.32872538 0.35029375 0.24271125 0.3040154  0.31373374 0.39690552\n",
      " 0.3348847  0.4005373  0.3004647  0.34284984 0.36441256 0.37891813\n",
      " 0.380889   0.30637714 0.39829649 0.37251859 0.38454412 0.33951278\n",
      " 0.35169638 0.34596658]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.32872538 0.35029375 0.24271125 0.3040154  0.31373374 0.39690552\n",
      " 0.3348847  0.4005373  0.3004647  0.34284984 0.36441256 0.37891813\n",
      " 0.380889   0.30637714 0.39829649 0.37251859 0.38454412 0.33951278\n",
      " 0.35169638 0.34596658]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 51.71 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.578125, 18.109375, 17.71875, 17.5, 15.765625, 15.390625, 15.2109375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 12, 缓存命中: 6, 新处理: 6, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.48144672 0.39650674 0.39333689 0.45729214 0.41232915 0.4750487\n",
      " 0.41460764 0.42787816 0.43868214 0.43981174 0.40017973 0.40513551]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.48144672 0.39650674 0.39333689 0.45729214 0.41232915 0.4750487\n",
      " 0.41460764 0.42787816 0.43868214 0.43981174 0.40017973 0.40513551]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.25 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.59375, 18.953125, 18.390625, 16.65625, 16.625, 16.328125, 15.7734375, 15.4609375, 15.328125, 14.53125, 14.4453125, 14.3828125, 13.5234375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.37 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [24.734375, 22.78125, 22.671875, 21.484375, 21.125, 21.046875, 19.421875, 18.578125, 21.40625, 21.21875, 20.40625, 19.796875, 19.796875, 18.09375, 17.53125, 16.8125, 20.5625, 20.09375, 19.875, 19.59375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.39 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [23.15625, 20.8125, 20.453125, 20.421875, 20.15625, 19.375, 18.890625, 18.765625, 20.03125, 19.921875, 19.328125, 19.109375, 18.84375, 18.484375, 18.390625, 17.609375, 21.515625, 19.78125, 17.6875, 16.546875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.31 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [19.609375, 19.40625, 15.9921875, 15.9375, 15.78125, 15.78125, 15.7109375, 15.703125, 18.625, 18.421875, 17.234375, 16.90625, 16.6875, 16.421875, 16.21875, 15.453125, 18.25, 17.40625, 16.4375, 15.5078125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.30 秒\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.65625, 19.765625, 18.765625, 17.96875, 16.828125, 16.75, 16.65625, 15.875, 15.65625, 15.2890625, 14.8515625, 13.7109375, 13.5234375, 12.90625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.09 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.453125, 20.140625, 19.265625, 19.0, 18.171875, 18.125, 17.8125, 17.546875, 17.515625, 17.46875, 17.0625, 16.328125, 15.6328125, 15.4921875, 14.96875, 14.609375, 14.5625, 13.4921875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.07 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.1875, 18.46875, 18.4375, 18.421875, 18.40625, 18.0, 17.90625, 17.796875, 17.703125, 17.5625, 17.484375, 17.46875, 17.34375, 17.109375, 17.015625, 16.875, 16.875, 16.515625, 15.5390625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.78 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.484375, 19.1875, 18.6875, 18.609375, 18.390625, 18.296875, 18.046875, 16.84375, 16.109375, 16.03125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.14 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [23.390625, 22.59375, 22.4375, 21.53125, 21.46875, 21.203125, 21.15625, 20.140625, 22.5, 20.828125, 19.875, 19.703125, 19.171875, 17.921875, 17.078125, 16.46875, 18.984375, 18.890625, 17.671875, 17.21875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.07 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [21.40625, 21.1875, 21.15625, 20.96875, 20.265625, 19.984375, 18.734375, 18.515625, 21.3125, 21.3125, 21.0, 20.8125, 20.609375, 20.109375, 19.578125, 19.15625, 22.5625, 20.21875, 19.125, 18.359375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.18 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [22.09375, 21.484375, 19.953125, 19.734375, 19.59375, 19.078125, 18.90625, 18.875, 23.515625, 20.125, 19.890625, 19.328125, 19.015625, 18.890625, 18.65625, 17.921875, 21.875, 18.9375, 18.84375, 17.09375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.02 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [21.890625, 20.625, 20.453125, 18.828125, 18.578125, 18.296875, 17.90625, 17.75, 19.734375, 19.109375, 18.796875, 18.578125, 18.046875, 17.796875, 17.71875, 17.515625, 19.28125, 17.984375, 17.75, 17.671875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 52.92 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [25.265625, 25.15625, 24.640625, 23.203125, 22.703125, 22.703125, 22.453125, 21.109375, 20.484375, 20.46875, 20.15625, 20.125, 19.90625, 19.640625, 19.609375, 19.578125, 19.53125, 19.3125, 18.046875, 17.4375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.19 秒\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:research_agent.core.embedding_model:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 17, 缓存命中: 5, 新处理: 12, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.39885208 0.45810484 0.31869759 0.39955453 0.48942008 0.46459819\n",
      " 0.41084927 0.4324851  0.31392371 0.37488579 0.33079809 0.34052507\n",
      " 0.21948533 0.36594081 0.41777558 0.37444964 0.35174219]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.39885208 0.45810484 0.31869759 0.39955453 0.48942008 0.46459819\n",
      " 0.41084927 0.4324851  0.31392371 0.37488579 0.33079809 0.34052507\n",
      " 0.21948533 0.36594081 0.41777558 0.37444964 0.35174219]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 53.47 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 14\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:research_agent.core.embedding_model:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:research_agent.core.embedding_model:生成向量最终失败，存储零向量到缓存: # 6. Discussion and Conclusion\n",
      "We introduce Univer...\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.984375, 19.734375, 19.703125, 19.1875, 18.671875, 17.703125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.31 秒\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 17, 缓存命中: 2, 新处理: 15, 失败: 1\n",
      "WARNING:research_agent.core.embedding_model:失败的文本: ['# 6. Discussion and Conclusion\\nWe introduce Univer...']\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.33922971 0.37306773 0.42622447 0.         0.38281302 0.40485494\n",
      " 0.36587558 0.3546522  0.37327516 0.44246168 0.38105874 0.42672151\n",
      " 0.3004022  0.24424432 0.33111225 0.37791471 0.28423126]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.33922971 0.37306773 0.42622447 0.         0.38281302 0.40485494\n",
      " 0.36587558 0.3546522  0.37327516 0.44246168 0.38105874 0.42672151\n",
      " 0.3004022  0.24424432 0.33111225 0.37791471 0.28423126]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.16 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [20.421875, 19.9375, 18.359375, 18.21875, 17.078125, 17.046875, 16.90625, 16.546875, 18.953125, 18.84375, 18.703125, 16.75, 16.40625, 16.109375, 15.78125, 15.6640625, 17.84375, 17.65625, 16.609375, 14.9765625]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.31 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.203125, 20.625, 20.015625, 19.984375, 19.71875, 19.28125, 18.765625, 18.609375, 18.421875, 18.1875, 17.515625, 16.875, 16.359375, 16.28125, 16.234375, 16.1875, 15.65625, 14.9453125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.49 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.328125, 22.0625, 21.828125, 20.546875, 20.328125, 20.21875, 20.171875, 20.046875, 19.640625, 19.546875, 19.53125, 19.265625, 19.171875, 18.65625, 18.46875, 17.015625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.37 秒\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 15, 缓存命中: 8, 新处理: 7, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.45595933 0.45988046 0.4078935  0.44339881 0.41738868 0.43542176\n",
      " 0.45625201 0.41658407 0.459673   0.48530513 0.35997589 0.43367288\n",
      " 0.48868988 0.4637525  0.41477307]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.45595933 0.45988046 0.4078935  0.44339881 0.41738868 0.43542176\n",
      " 0.45625201 0.41658407 0.459673   0.48530513 0.35997589 0.43367288\n",
      " 0.48868988 0.4637525  0.41477307]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.65 秒\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 17, 缓存命中: 5, 新处理: 12, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.45185762 0.35520728 0.28055397 0.35300282 0.441299   0.37472466\n",
      " 0.37068055 0.29506755 0.40650056 0.36089264 0.41449173 0.40938402\n",
      " 0.35338122 0.29391822 0.3999125  0.3147671  0.3208084 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.45185762 0.35520728 0.28055397 0.35300282 0.441299   0.37472466\n",
      " 0.37068055 0.29506755 0.40650056 0.36089264 0.41449173 0.40938402\n",
      " 0.35338122 0.29391822 0.3999125  0.3147671  0.3208084 ]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 54.94 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [19.1875, 18.84375, 18.546875, 18.53125, 18.109375, 17.625, 16.015625, 14.78125, 19.078125, 18.078125, 17.75, 17.71875, 17.6875, 17.15625, 17.09375, 16.0, 19.125, 18.859375, 18.203125, 18.03125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.23 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.265625, 20.234375, 19.84375, 19.359375, 19.125, 18.3125, 17.640625, 17.140625]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.32 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 19\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [20.1875, 19.0625, 18.109375, 17.953125, 16.5, 15.859375, 15.3203125, 14.6875, 17.71875, 17.0, 16.734375, 16.703125, 15.4921875, 15.46875, 15.3046875, 15.0546875, 18.921875, 18.328125, 17.4375, 16.59375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.16 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [24.0, 22.296875, 20.125, 19.703125, 19.3125, 19.296875, 19.046875, 17.859375, 17.59375, 17.328125, 16.734375, 14.078125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.31 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [24.15625, 23.390625, 21.90625, 20.8125, 18.46875, 17.890625, 17.359375, 15.1015625, 19.765625, 19.046875, 18.203125, 18.140625, 17.34375, 17.140625, 16.265625, 16.171875, 20.0625, 18.484375, 17.984375, 16.234375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.33 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.75, 19.96875, 19.09375, 18.953125, 18.65625, 18.28125, 17.953125, 17.625, 17.5, 17.4375, 17.4375, 17.375, 17.328125, 16.75, 16.390625, 16.234375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.29 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 27\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [17.3125, 16.40625, 15.2265625, 14.4921875, 13.484375, 13.4140625, 12.8671875, 12.8203125, 15.203125, 15.15625, 14.4765625, 14.265625, 13.6875, 13.3671875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.60 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 26, 缓存命中: 7, 新处理: 19, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.36157649 0.39690327 0.39883185 0.35518343 0.37172674 0.32145459\n",
      " 0.34317557 0.40341502 0.34635732 0.31196313 0.32091194 0.34755023\n",
      " 0.35138691 0.33798416 0.35905808 0.33678666 0.31609584 0.35058167\n",
      " 0.30734299 0.33636999 0.37050036 0.32988953 0.32367966 0.30103695\n",
      " 0.27590629 0.30367931]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.36157649 0.39690327 0.39883185 0.35518343 0.37172674 0.32145459\n",
      " 0.34317557 0.40341502 0.34635732 0.31196313 0.32091194 0.34755023\n",
      " 0.35138691 0.33798416 0.35905808 0.33678666 0.31609584 0.35058167\n",
      " 0.30734299 0.33636999 0.37050036 0.32988953 0.32367966 0.30103695\n",
      " 0.27590629 0.30367931]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 55.80 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 25\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.25, 17.046875, 16.921875, 16.84375, 15.6484375, 15.5625, 15.0390625, 12.9140625, 17.625, 17.140625, 16.765625, 16.59375, 15.765625, 15.75, 14.6328125, 14.015625, 15.703125, 13.53125, 12.1796875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 56.18 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [16.5, 16.296875, 15.8828125, 15.7734375, 15.03125, 14.0234375, 14.0, 12.3203125, 15.171875, 14.90625, 13.3125, 13.0546875, 12.7109375, 12.6484375, 11.9375, 11.890625, 17.4375, 16.875, 15.7578125, 15.578125, 15.421875, 14.5390625, 13.9765625, 12.4296875, 17.53125, 15.5859375, 15.203125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 56.37 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 19\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.703125, 17.109375, 17.03125, 16.921875, 16.90625, 16.03125, 15.6015625, 15.5390625, 17.484375, 17.328125, 16.96875, 16.875, 16.75, 16.09375, 15.90625, 13.640625, 16.3125, 16.140625, 15.59375, 15.078125, 14.890625, 14.390625, 14.34375, 13.9765625, 13.5078125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 57.00 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.671875, 18.484375, 18.4375, 17.6875, 17.421875, 17.28125, 16.40625, 15.03125, 20.21875, 18.1875, 17.984375, 16.953125, 16.78125, 16.609375, 16.3125, 15.875, 16.65625, 16.640625, 13.4765625]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 57.02 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 12\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 15, 缓存命中: 8, 新处理: 7, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.39953474 0.40195269 0.41316627 0.34201349 0.38907919 0.31621669\n",
      " 0.39657379 0.39803086 0.43144188 0.29592317 0.42121093 0.29502126\n",
      " 0.38615583 0.43173582 0.36798142]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.39953474 0.40195269 0.41316627 0.34201349 0.38907919 0.31621669\n",
      " 0.39657379 0.39803086 0.43144188 0.29592317 0.42121093 0.29502126\n",
      " 0.38615583 0.43173582 0.36798142]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 57.14 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 25\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.109375, 17.421875, 16.71875, 15.8515625, 15.03125, 14.953125, 14.9453125, 14.84375, 19.046875, 15.28125, 13.984375, 13.171875]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 57.66 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 23\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [17.671875, 15.8046875, 15.03125, 14.8359375, 14.828125, 14.0078125, 14.0, 13.859375, 17.703125, 17.15625, 15.53125, 14.8046875, 14.7890625, 14.0859375, 13.765625, 13.59375, 18.125, 16.0, 15.8359375, 15.421875, 15.3125, 14.828125, 14.8046875, 14.703125, 14.125]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 58.52 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.embedding_model:向量生成完成，总数: 11, 缓存命中: 10, 新处理: 1, 失败: 0\n",
      "INFO:research_agent.core.embedding_model:计算得到的余弦相似度分数: [0.40651624 0.38924128 0.41418268 0.34333598 0.45593456 0.39139227\n",
      " 0.34342654 0.38045892 0.3388932  0.33169644 0.3184981 ]\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.40651624 0.38924128 0.41418268 0.34333598 0.45593456 0.39139227\n",
      " 0.34342654 0.38045892 0.3388932  0.33169644 0.3184981 ]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 58.48 秒\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 23\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [17.453125, 16.96875, 16.15625, 15.734375, 15.578125, 15.28125, 14.2265625, 14.125, 17.703125, 16.84375, 15.734375, 14.875, 14.6640625, 14.4296875, 14.203125, 14.1328125, 15.375, 15.2578125, 14.8125, 14.7890625, 13.9765625, 13.8203125, 13.484375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 59.07 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [15.4453125, 13.9765625, 13.5390625, 12.9296875, 12.8984375, 12.59375, 11.6484375, 11.4140625, 16.75, 14.65625, 13.5625, 13.28125, 12.9765625, 12.9609375, 12.84375, 11.8125, 15.453125, 15.2421875, 13.8203125, 13.6328125, 12.4609375, 11.984375, 11.484375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 59.70 秒\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 21\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.703125, 18.578125, 18.3125, 17.6875, 17.515625, 17.46875, 16.3125, 15.21875, 20.140625, 18.0, 17.734375, 17.28125, 16.984375, 16.84375, 16.828125, 15.0703125, 17.921875, 17.25, 16.875, 16.609375, 13.7109375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 60.38 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.reference_processor:第二次重排序检索到的论文数量: 19\n",
      "INFO:research_agent.core.reference_processor:第二次重排序\n",
      "INFO:research_agent.core.reference_processor:第二次重排序结果: [18.71875, 17.140625, 16.734375, 16.3125, 15.7421875, 15.71875, 14.9453125, 14.671875, 17.015625, 16.796875, 16.640625, 16.328125, 15.4609375, 14.921875, 13.6015625, 8.75, 16.84375, 16.09375, 13.109375]\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 63.42 秒\n",
      "INFO:asyncio:已完成process_drafts\n"
     ]
    }
   ],
   "source": [
    "\n",
    "results = await pipeline.process_drafts()\n",
    "logger.info(\"已完成process_drafts\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[2, 7, 4, 5, 3, 4, 2, 4, 4, 10, 7, 6, 5, 4, 4, 2, 9, 1, 3, 5, 7, 6, 4, 3, 2, 1]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[len(p) for p in pipeline.statement_list]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[2, 7, 4, 5, 3, 4, 2, 4, 4, 10, 7, 6, 5, 4, 4, 2, 9, 1, 3, 5, 7, 6, 4, 3, 2, 1]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "[len(r) for r in results]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:已完成update_sections\n",
      "INFO:asyncio:已完成replace_citations_with_num\n"
     ]
    }
   ],
   "source": [
    "new_content = pipeline.update_sections(pipeline.merged_sections, results)\n",
    "logger.info(\"已完成update_sections\")\n",
    "\n",
    "new_content, reference_list = pipeline.replace_citations_with_num(\n",
    "    new_content)\n",
    "logger.info(\"已完成replace_citations_with_num\")\n",
    "final_survey = new_content + \"\\n\\n\" + \\\n",
    "    \"# References\\n\\n\" + \"\\n\".join(reference_list)\n",
    "\n",
    "# 将最终草稿保存为Markdown文件\n",
    "with open(\"final_survey_score.md\", \"w\", encoding=\"utf-8\") as md_file:\n",
    "    md_file.write(final_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import partial\n",
    "\n",
    "\n",
    "max_scores = pipeline.find_citation_max_score(final_survey)\n",
    "reduce_citation_with_scores = partial(\n",
    "    pipeline.reduce_citation, max_scores=max_scores)\n",
    "final_survey = re.sub(r\"<sup>(.*?)</sup><ss>(.*?)</ss>\",\n",
    "                        reduce_citation_with_scores, final_survey)\n",
    "\n",
    "# 将最终草稿保存为Markdown文件\n",
    "with open(\"final_survey_reduction.md\", \"w\", encoding=\"utf-8\") as md_file:\n",
    "    md_file.write(final_survey)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:已完成update_sections\n",
      "INFO:asyncio:已完成replace_citations_with_num\n",
      "INFO:asyncio:已完成process_final_survey\n"
     ]
    }
   ],
   "source": [
    "\n",
    "new_content = pipeline.update_sections(pipeline.merged_sections, results)\n",
    "logger.info(\"已完成update_sections\")\n",
    "\n",
    "new_content, reference_list = pipeline.replace_citations_with_num(\n",
    "    new_content)\n",
    "logger.info(\"已完成replace_citations_with_num\")\n",
    "\n",
    "final_survey = new_content + \"\\n\\n\" + \\\n",
    "    \"# References\\n\\n\" + \"\\n\".join(reference_list)\n",
    "final_survey = pipeline.process_final_survey(final_survey)\n",
    "logger.info(\"已完成process_final_survey\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:已完成保存最终草稿为Markdown文件\n"
     ]
    }
   ],
   "source": [
    "paper_draft = f\"# {topic}\\n\\n{final_survey}\"\n",
    "# 将最终草稿保存为Markdown文件\n",
    "with open(\"final_survey.md\", \"w\", encoding=\"utf-8\") as md_file:\n",
    "    md_file.write(paper_draft)\n",
    "logger.info(\"已完成保存最终草稿为Markdown文件\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "114"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum([len(i) for i in statement_list])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'statement_hyde': 'The technological roadmap of multi-model large models is a critical research area shaping the future of artificial intelligence.',\n",
       "  'evidence_spans': [0, 0],\n",
       "  'keywords': ['technological roadmap',\n",
       "   'multi-model large models',\n",
       "   'artificial intelligence'],\n",
       "  'citation_needed_degree': 'High'},\n",
       " {'statement_hyde': 'The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.',\n",
       "  'evidence_spans': [4, 4],\n",
       "  'keywords': ['survey', 'literature', 'papers', 'models', 'understanding'],\n",
       "  'citation_needed_degree': 'High'}]"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "await find_statement_citation.process_section(section, topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "26\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "def load_statement_list_from_json(file_path):\n",
    "    with open(file_path, 'r', encoding='utf-8') as json_file:\n",
    "        return json.load(json_file)\n",
    "\n",
    "# 使用示例\n",
    "pipeline.statement_list = load_statement_list_from_json('statement_list.json')\n",
    "print(len(pipeline.statement_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "logger.info(\"开始检索文献\")\n",
    "await pipeline.prepare_draft_info()\n",
    "logger.info(\"已完成prepare_draft_info\")\n",
    "# 25s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "\n",
    "# 保存parsed_draft_infos为json文件\n",
    "with open(\"parsed_draft_infos.json\", \"w\", encoding=\"utf-8\") as json_file:\n",
    "    json.dump(pipeline.parsed_draft_infos, json_file, ensure_ascii=False, indent=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "with open(\"parsed_draft_infos.json\", \"r\", encoding=\"utf-8\") as json_file:\n",
    "    pipeline.parsed_draft_infos = json.load(json_file)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "async def process_drafts_in_batches(parsed_draft_infos, batch_size=5):\n",
    "    all_tasks = []\n",
    "\n",
    "    # 遍历每个draft并创建任务\n",
    "    for draft in parsed_draft_infos:\n",
    "        task_group = []\n",
    "        \n",
    "        # 将任务分批次处理\n",
    "        for i in range(0, len(draft[\"hyde\"]), batch_size):\n",
    "            batch_hydes = draft[\"hyde\"][i:i + batch_size]\n",
    "            batch_evidence_spans = draft[\"evidence_spans\"][i:i + batch_size]\n",
    "            batch_retrieved_papers = draft[\"retrieved_papers\"][i:i + batch_size]\n",
    "            batch_keywords = draft[\"keywords\"][i:i + batch_size]\n",
    "            \n",
    "            # 创建批次任务\n",
    "            batch_tasks = [\n",
    "                pipeline.processor.process_hyde(\n",
    "                    hyde, evidence_index, retrieved_papers,\n",
    "                    keywords, draft[\"section\"]\n",
    "                )\n",
    "                for hyde, evidence_index, retrieved_papers, keywords in zip(\n",
    "                    batch_hydes,\n",
    "                    batch_evidence_spans,\n",
    "                    batch_retrieved_papers,\n",
    "                    batch_keywords\n",
    "                )\n",
    "            ]\n",
    "            \n",
    "            task_group.extend(batch_tasks)\n",
    "\n",
    "        all_tasks.extend(task_group)  # 将所有批次的任务添加到总任务列表中\n",
    "\n",
    "    # 使用asyncio.gather异步执行所有任务\n",
    "    results = await asyncio.gather(*all_tasks)\n",
    "\n",
    "    # 重组结果\n",
    "    final_output = []\n",
    "    current_index = 0\n",
    "    for draft in parsed_draft_infos:\n",
    "        count = len(draft[\"hyde\"])\n",
    "        final_output.append(results[current_index:current_index + count])\n",
    "        current_index += count\n",
    "\n",
    "    return final_output\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "import random\n",
    "# 模拟 process_hyde 函数\n",
    "async def mock_process_hyde(hyde, evidence_index, retrieved_papers, keywords, section):\n",
    "    await asyncio.sleep(random.randint(2, 5))  # 随机生成2到30之间的数字作为异步任务的延迟\n",
    "    return f\"Processed: {hyde}, {evidence_index}, {retrieved_papers}, {keywords}, {section}\"\n",
    "\n",
    "\n",
    "batch_size = 10  # 每个批次 5 个任务\n",
    "all_tasks = []\n",
    "\n",
    "# 遍历每个 draft 并创建任务\n",
    "for draft in pipeline.parsed_draft_infos:\n",
    "    task_group = []\n",
    "    \n",
    "    # 按 batch_size 分批\n",
    "    for i in range(0, len(draft[\"hyde\"]), batch_size):\n",
    "        batch_hydes = draft[\"hyde\"][i:i + batch_size]\n",
    "        batch_evidence_spans = draft[\"evidence_spans\"][i:i + batch_size]\n",
    "        batch_retrieved_papers = draft[\"retrieved_papers\"][i:i + batch_size]\n",
    "        batch_keywords = draft[\"keywords\"][i:i + batch_size]\n",
    "\n",
    "        # 创建批次任务\n",
    "        batch_tasks = [\n",
    "            mock_process_hyde(hyde, evidence_index, retrieved_papers, keywords, draft[\"section\"])\n",
    "            for hyde, evidence_index, retrieved_papers, keywords in zip(\n",
    "                batch_hydes, batch_evidence_spans, batch_retrieved_papers, batch_keywords\n",
    "            )\n",
    "        ]\n",
    "        \n",
    "        task_group.extend(batch_tasks)\n",
    "\n",
    "    all_tasks.extend(task_group)  # 将所有批次的任务添加到总任务列表中\n",
    "\n",
    "# 运行异步任务\n",
    "async def main():\n",
    "    results = await asyncio.gather(*all_tasks)\n",
    "    for res in results:\n",
    "        print(res)\n",
    "\n",
    "# 执行测试\n",
    "asyncio.run(main())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "import asyncio\n",
    "from typing import List, Dict, Any, Tuple, Optional\n",
    "from nltk.tokenize import sent_tokenize\n",
    "import time\n",
    "from research_agent.core.embedding_model import EmbeddingModel_speed\n",
    "from research_agent.core.utils import chunking\n",
    "from research_agent.core.query import Query\n",
    "from research_agent.core.gene_hyde import GenStatementHyde\n",
    "import re\n",
    "# 配置日志\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "class ReferenceProcessor:\n",
    "    def __init__(self):\n",
    "        self.embedding_model = EmbeddingModel_speed()\n",
    "        self.gene_statement_hyde = GenStatementHyde()\n",
    "        self.query = Query()\n",
    "        # 添加缓存\n",
    "        self._rerank_cache = {}\n",
    "        self._cos_scores_cache = {}\n",
    "        self._papers_content_cache = {}\n",
    "        # 批处理大小\n",
    "        self.chunk_size = 8\n",
    "        \n",
    "    async def process_drafts(self, parsed_draft_infos: List[Dict[str, Any]]) -> List[List[Tuple[Optional[int], Optional[str]]]]:\n",
    "        \"\"\"优化后的草稿处理函数\"\"\"\n",
    "        # 预处理和分组任务\n",
    "        task_groups = []\n",
    "        for draft in parsed_draft_infos:\n",
    "            batch_tasks = []\n",
    "            for batch_start in range(0, len(draft[\"hyde\"]), self.chunk_size):\n",
    "                batch_end = min(batch_start + self.chunk_size, len(draft[\"hyde\"]))\n",
    "                batch = {\n",
    "                    \"hyde\": draft[\"hyde\"][batch_start:batch_end],\n",
    "                    \"original_statements\": draft[\"original_statements\"][batch_start:batch_end],\n",
    "                    \"evidence_spans\": draft[\"evidence_spans\"][batch_start:batch_end],\n",
    "                    \"retrieved_papers\": draft[\"retrieved_papers\"][batch_start:batch_end],\n",
    "                    \"keywords\": draft[\"keywords\"][batch_start:batch_end],\n",
    "                    \"section\": draft[\"section\"]\n",
    "                }\n",
    "                batch_tasks.append(self._process_batch(batch))\n",
    "            task_groups.append(batch_tasks)\n",
    "\n",
    "        # 并行执行每组任务\n",
    "        results = []\n",
    "        for group in task_groups:\n",
    "            group_results = await asyncio.gather(*group)\n",
    "            # 展平批处理结果\n",
    "            flat_results = [item for sublist in group_results for item in sublist]\n",
    "            results.append(flat_results)\n",
    "        \n",
    "        return results\n",
    "\n",
    "    async def _process_batch(self, batch: Dict[str, Any]) -> List[Tuple[Optional[int], Optional[str]]]:\n",
    "        \"\"\"批量处理HYDE语句\"\"\"\n",
    "        async def process_single(args):\n",
    "            hyde, orig_stmt, evidence_idx, papers, keywords = args\n",
    "            return await self._process_hyde_with_cache(\n",
    "                hyde, orig_stmt, evidence_idx, papers, keywords, batch[\"section\"]\n",
    "            )\n",
    "\n",
    "        # 并行处理批次中的所有项\n",
    "        tasks = [\n",
    "            process_single((h, o, e, p, k))\n",
    "            for h, o, e, p, k in zip(\n",
    "                batch[\"hyde\"], batch[\"original_statements\"],\n",
    "                batch[\"evidence_spans\"], batch[\"retrieved_papers\"],\n",
    "                batch[\"keywords\"]\n",
    "            )\n",
    "        ]\n",
    "        return await asyncio.gather(*tasks)\n",
    "\n",
    "    async def _process_hyde_with_cache(self, hyde: str, original_statement: str,\n",
    "                                     evidence_index: List[int], retrieved_papers: List[Dict[str, Any]],\n",
    "                                     keywords: str, merge_section: str) -> Tuple[Optional[int], Optional[str]]:\n",
    "        \"\"\"使用缓存的HYDE处理函数\"\"\"\n",
    "        cache_key = (hyde, original_statement, tuple(sorted(p['id'] for p in retrieved_papers)))\n",
    "        if cache_key in self._rerank_cache:\n",
    "            return self._rerank_cache[cache_key]\n",
    "\n",
    "        try:\n",
    "            # 获取或计算papers_content（使用缓存）\n",
    "            papers_content = await self._get_cached_papers_content(retrieved_papers)\n",
    "            \n",
    "            # 获取或计算cos_scores（使用缓存）\n",
    "            cos_scores = await self._get_cached_cos_scores(original_statement, papers_content)\n",
    "            \n",
    "            if any(score > self.cos_threshold for score in cos_scores):\n",
    "                result = await self._handle_high_cos_scores(\n",
    "                    original_statement, retrieved_papers, cos_scores, keywords, hyde\n",
    "                )\n",
    "            else:\n",
    "                result = await self._handle_low_cos_scores(\n",
    "                    hyde, keywords, retrieved_papers\n",
    "                )\n",
    "            \n",
    "            # 缓存结果\n",
    "            self._rerank_cache[cache_key] = result\n",
    "            return result\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"处理HYDE时发生错误: {str(e)}\")\n",
    "            return None, None\n",
    "\n",
    "    async def _get_cached_papers_content(self, papers: List[Dict[str, Any]]) -> List[str]:\n",
    "        \"\"\"获取缓存的论文内容\"\"\"\n",
    "        cache_key = tuple(p['id'] for p in papers)\n",
    "        if cache_key not in self._papers_content_cache:\n",
    "            content = [p[\"entity\"][\"chunk_text\"] for p in papers]\n",
    "            self._papers_content_cache[cache_key] = await asyncio.to_thread(chunking, content)\n",
    "        return self._papers_content_cache[cache_key]\n",
    "\n",
    "    async def _get_cached_cos_scores(self, statement: str, papers_content: List[str]) -> List[float]:\n",
    "        \"\"\"获取缓存的余弦相似度分数\"\"\"\n",
    "        cache_key = (statement, tuple(papers_content))\n",
    "        if cache_key not in self._cos_scores_cache:\n",
    "            self._cos_scores_cache[cache_key] = await self.embedding_model.get_cos_scores(\n",
    "                statement, papers_content\n",
    "            )\n",
    "        return self._cos_scores_cache[cache_key]\n",
    "\n",
    "    async def _handle_high_cos_scores(self, statement: str, papers: List[Dict[str, Any]],\n",
    "                                    cos_scores: List[float], keywords: str, hyde: str) -> str:\n",
    "        \"\"\"处理高余弦相似度的情况\"\"\"\n",
    "        try:\n",
    "            rerank_papers = [p for i, p in enumerate(papers) if cos_scores[i] > self.cos_threshold]\n",
    "            rerank_content = await self._get_cached_papers_content(rerank_papers)\n",
    "            \n",
    "            # 批量处理重排序\n",
    "            results = await self._batch_rerank(statement, rerank_content, rerank_papers)\n",
    "            return self._format_reference_info(results, rerank_papers)\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"处理高相似度分数时发生错误: {str(e)}\")\n",
    "            return \"\"\n",
    "\n",
    "    async def _batch_rerank(self, query: str, documents: List[str],\n",
    "                           papers: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n",
    "        \"\"\"批量处理重排序请求\"\"\"\n",
    "        batches = [documents[i:i + self.chunk_size] \n",
    "                  for i in range(0, len(documents), self.chunk_size)]\n",
    "        \n",
    "        tasks = [\n",
    "            self.embedding_model.rerank_documents(query=query, documents=batch)\n",
    "            for batch in batches\n",
    "        ]\n",
    "        \n",
    "        return await asyncio.gather(*tasks)\n",
    "\n",
    "    def _format_reference_info(self, rerank_results: List[Dict[str, Any]],\n",
    "                             papers: List[Dict[str, Any]]) -> str:\n",
    "        \"\"\"格式化引用信息\"\"\"\n",
    "        reference_info = []\n",
    "        for result, paper in zip(rerank_results, papers):\n",
    "            if result[\"relevance_score\"] > self.rerank_threshold:\n",
    "                reference_info.append(\n",
    "                    f\"<sup>{re.sub(r'<sup>.*?</sup>', '', paper['entity']['paper_title'])}.\"\n",
    "                    f\"{paper['entity']['original_filename']},\"\n",
    "                    f\"{paper['entity']['year']}, \"\n",
    "                    f\"chunk {paper['entity']['chunk_id']}</sup>\"\n",
    "                    f\"<ss>{result['relevance_score']}</ss>\"\n",
    "                )\n",
    "        return \"\".join(reference_info)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "start_idx = 0\n",
    "idx=1\n",
    "hyde  = pipeline.parsed_draft_infos[start_idx][\"hyde\"][idx]\n",
    "original_statement = pipeline.parsed_draft_infos[start_idx][\"original_statement\"][idx]\n",
    "retrieved_papers = pipeline.parsed_draft_infos[start_idx][\"retrieved_papers\"][idx]\n",
    "keywords = pipeline.parsed_draft_infos[start_idx][\"keywords\"][idx]\n",
    "merge_section = pipeline.parsed_draft_infos[start_idx][\"section\"]\n",
    "evidence_index = pipeline.parsed_draft_infos[start_idx][\"evidence_spans\"][idx]\n",
    "original_statement"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "import asyncio\n",
    "from typing import List, Dict, Tuple, Union\n",
    "import numpy as np\n",
    "from zhipuai import ZhipuAI\n",
    "from research_agent.core.config import Config\n",
    "import aiohttp\n",
    "from research_agent.core.utils import chunking\n",
    "import random\n",
    "\n",
    "# 配置日志\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "class EmbeddingModel_speed:\n",
    "    def __init__(self):\n",
    "        \"\"\"初始化嵌入模型\"\"\"\n",
    "        self.configs = Config()\n",
    "        self.client = ZhipuAI(api_key=self.configs.API_KEY)\n",
    "        # 初始化缓存和配置\n",
    "        self._embedding_cache = {}\n",
    "        self.max_cache_size = 10000\n",
    "        self.batch_size = 5  # 批处理大小\n",
    "        self.embedding_dim = 2048  # 向量维度\n",
    "\n",
    "    async def get_embeddings(self, texts: Union[str, List[str]]) -> Tuple[List[np.ndarray], List[int]]:\n",
    "        \"\"\"批量处理embedding请求\"\"\"\n",
    "        if isinstance(texts, str):\n",
    "            texts = [texts]\n",
    "            \n",
    "        # 创建结果列表和待处理列表\n",
    "        results = [None] * len(texts)\n",
    "        texts_to_process = []\n",
    "        process_indices = []\n",
    "\n",
    "        # 首先检查缓存\n",
    "        for idx, text in enumerate(texts):\n",
    "            cache_key = hash(text)\n",
    "            if cache_key in self._embedding_cache:\n",
    "                results[idx] = self._embedding_cache[cache_key]\n",
    "            else:\n",
    "                texts_to_process.append(text)\n",
    "                process_indices.append(idx)\n",
    "\n",
    "        # 处理未缓存的文本\n",
    "        if texts_to_process:\n",
    "            # 批量处理未缓存的文本\n",
    "            for i in range(0, len(texts_to_process), self.batch_size):\n",
    "                batch_texts = texts_to_process[i:i + self.batch_size]\n",
    "                batch_indices = process_indices[i:i + self.batch_size]\n",
    "\n",
    "                try:\n",
    "                    response = await asyncio.to_thread(\n",
    "                        self.client.embeddings.create,\n",
    "                        model=self.configs.EMBEDDING_MODEL,\n",
    "                        input=batch_texts\n",
    "                    )\n",
    "                    \n",
    "                    # 将结果放入对应位置\n",
    "                    for j, (data, idx) in enumerate(zip(response.data, batch_indices)):\n",
    "                        embedding = np.array(data.embedding)\n",
    "                        cache_key = hash(texts_to_process[i + j])\n",
    "                        self._embedding_cache[cache_key] = embedding\n",
    "                        results[idx] = embedding\n",
    "                        \n",
    "                except Exception as e:\n",
    "                    logger.error(f\"批处理生成向量失败: {e}\")\n",
    "                    # 并行处理失败的批次\n",
    "                    async def process_single_text(text: str, idx: int):\n",
    "                        try:\n",
    "                            embedding = await self.generate_embedding(text)\n",
    "                            results[idx] = embedding\n",
    "                        except Exception as e:\n",
    "                            logger.error(f\"单个文本向量生成失败: {text[:50]}... 错误: {e}\")\n",
    "                            zero_vector = np.zeros(self.embedding_dim)\n",
    "                            cache_key = hash(text)\n",
    "                            self._embedding_cache[cache_key] = zero_vector\n",
    "                            results[idx] = zero_vector\n",
    "                    \n",
    "                    # 创建并发任务\n",
    "                    tasks = [\n",
    "                        process_single_text(text, idx)\n",
    "                        for text, idx in zip(batch_texts, batch_indices)\n",
    "                    ]\n",
    "                    # 并行执行所有任务\n",
    "                    await asyncio.gather(*tasks)\n",
    "\n",
    "        # 管理缓存大小\n",
    "        await self._manage_cache_size()\n",
    "\n",
    "        # 统计失败的索引\n",
    "        zero_vector = np.zeros(self.embedding_dim)\n",
    "        failed_ids = [i for i, res in enumerate(results) \n",
    "                     if res is None or np.array_equal(res, zero_vector)]\n",
    "\n",
    "        # 记录处理结果和失败信息\n",
    "        logger.info(f\"向量生成完成，总数: {len(texts)}, 缓存命中: {len(texts) - len(texts_to_process)}, \"\n",
    "                   f\"新处理: {len(texts_to_process)}, 失败: {len(failed_ids)}\")\n",
    "        if failed_ids:\n",
    "            logger.warning(f\"失败的文本: {[texts[i][:50] + '...' for i in failed_ids]}\")\n",
    "\n",
    "        return results, failed_ids\n",
    "\n",
    "    async def get_embedding(self, text: str) -> np.ndarray:\n",
    "        \"\"\"获取单个文本的向量表示\"\"\"\n",
    "        # 检查缓存\n",
    "        cache_key = hash(text)\n",
    "        if cache_key in self._embedding_cache:\n",
    "            return self._embedding_cache[cache_key]\n",
    "\n",
    "        try:\n",
    "            # 如果没有缓存，生成新的embedding\n",
    "            response = await asyncio.to_thread(\n",
    "                self.client.embeddings.create,\n",
    "                model=self.configs.EMBEDDING_MODEL,\n",
    "                input=text\n",
    "            )\n",
    "            embedding = np.array(response.data[0].embedding)\n",
    "            \n",
    "            # 存入缓存\n",
    "            self._embedding_cache[cache_key] = embedding\n",
    "            return embedding\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"生成向量失败: {e}\")\n",
    "            # 失败时返回零向量并缓存\n",
    "            zero_vector = np.zeros(self.embedding_dim)\n",
    "            self._embedding_cache[cache_key] = zero_vector\n",
    "            return zero_vector\n",
    "\n",
    "    async def generate_embedding(self, text: str) -> np.ndarray:\n",
    "        \"\"\"生成单个文本的向量表示（带重试机制）\"\"\"\n",
    "        cache_key = hash(text)\n",
    "        if cache_key in self._embedding_cache:\n",
    "            return self._embedding_cache[cache_key]\n",
    "\n",
    "        max_retries = 2\n",
    "        retry_count = 0\n",
    "        reduce_length = 0\n",
    "\n",
    "        while retry_count < max_retries:\n",
    "            try:\n",
    "                response = await asyncio.to_thread(\n",
    "                    self.client.embeddings.create,\n",
    "                    model=self.configs.EMBEDDING_MODEL,\n",
    "                    input=text\n",
    "                )\n",
    "                embedding = np.array(response.data[0].embedding)\n",
    "                self._embedding_cache[cache_key] = embedding\n",
    "                return embedding\n",
    "            except Exception as e:\n",
    "                logger.warning(f\"生成向量失败 (第 {retry_count+1} 次): {e}\")\n",
    "                retry_count += 1\n",
    "\n",
    "                # 尝试减少文本长度后重新生成\n",
    "                papers_content = await asyncio.get_running_loop().run_in_executor(\n",
    "                    None, chunking, [text], reduce_length\n",
    "                )\n",
    "                reduce_length += 100 * retry_count\n",
    "                text = '\\n'.join(papers_content)\n",
    "\n",
    "        # 超过重试次数，返回零向量并存入缓存\n",
    "        logger.error(f\"生成向量最终失败，存储零向量到缓存: {text[:50]}...\")\n",
    "        zero_vector = np.zeros(self.embedding_dim)\n",
    "        self._embedding_cache[cache_key] = zero_vector\n",
    "        return zero_vector\n",
    "\n",
    "    async def get_cos_scores(self, statement: str, papers_content: List[str]) -> np.ndarray:\n",
    "        \"\"\"计算余弦相似度分数\"\"\"\n",
    "        doc_embeddings, failed_embeding_ids = await self.get_embeddings(papers_content)\n",
    "\n",
    "        query_embedding = await self.get_embedding(statement)\n",
    "        \n",
    "        # 确保输入是 numpy 数组\n",
    "        query_embedding = np.array(query_embedding)\n",
    "        doc_embeddings = [np.array(de) for de in doc_embeddings]\n",
    "        \n",
    "        # 计算余弦相似度\n",
    "        scores = np.array([self.cosine_similarity(query_embedding, de) for de in doc_embeddings])\n",
    "        \n",
    "        # 记录相似度分数\n",
    "        logger.info(f\"计算得到的余弦相似度分数: {scores}\")\n",
    "        \n",
    "        return scores\n",
    "\n",
    "    def cosine_similarity(self, vec_a: np.ndarray, vec_b: np.ndarray) -> float:\n",
    "        \"\"\"计算余弦相似度\"\"\"\n",
    "        # 确保输入是非零向量\n",
    "        if np.all(vec_a == 0) or np.all(vec_b == 0):\n",
    "            return 0.0\n",
    "            \n",
    "        dot_product = np.dot(vec_a, vec_b)\n",
    "        norm_a = np.linalg.norm(vec_a)\n",
    "        norm_b = np.linalg.norm(vec_b)\n",
    "        \n",
    "        # 避免除零错误\n",
    "        if norm_a == 0 or norm_b == 0:\n",
    "            return 0.0\n",
    "            \n",
    "        return dot_product / (norm_a * norm_b)\n",
    "\n",
    "    async def rerank_documents(self, query: str, documents: List[str], top_n=0, return_docs=True, return_raw=True):\n",
    "        \"\"\"异步调用 Rerank API\"\"\"\n",
    "        url = \"https://open.bigmodel.cn/api/paas/v4/rerank\"\n",
    "        headers = {\n",
    "            \"Authorization\": f\"Bearer {f'34965b98578344768336d1f9321c3c2e.88Jd6PwHwbbSsfsg'}\",\n",
    "            \"Content-Type\": \"application/json\"\n",
    "        }\n",
    "        payload = {\n",
    "            \"model\": \"rerank\",\n",
    "            \"query\": query,\n",
    "            \"documents\": documents,\n",
    "            \"top_n\": top_n,\n",
    "            \"return_documents\": return_docs,\n",
    "            \"return_raw_scores\": return_raw\n",
    "        }\n",
    "\n",
    "        async with aiohttp.ClientSession() as session:\n",
    "            try:\n",
    "                async with session.post(url, headers=headers, json=payload) as response:\n",
    "                    result = await response.json()\n",
    "                    if \"error\" in result:\n",
    "                        raise Exception(f\"API Error: {result['error']}\")\n",
    "                    return result\n",
    "            except Exception as e:\n",
    "                logger.error(f\"Rerank请求失败: {e}\")\n",
    "                return None\n",
    "\n",
    "    async def _manage_cache_size(self):\n",
    "        \"\"\"管理缓存大小\"\"\"\n",
    "        if len(self._embedding_cache) >= self.max_cache_size:\n",
    "            # 随机删除10%的缓存项\n",
    "            remove_keys = random.sample(\n",
    "                list(self._embedding_cache.keys()), \n",
    "                int(self.max_cache_size * 0.1)\n",
    "            )\n",
    "            for key in remove_keys:\n",
    "                del self._embedding_cache[key]\n",
    "            logger.info(f\"清理缓存 {len(remove_keys)} 项\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "embeding_model = EmbeddingModel_speed()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {},
   "outputs": [],
   "source": [
    "import logging\n",
    "import asyncio\n",
    "from typing import List, Dict, Any, Tuple, Optional\n",
    "from nltk.tokenize import sent_tokenize\n",
    "import time\n",
    "# from research_agent.core.embedding_model import EmbeddingModel_speed\n",
    "from research_agent.core.utils import chunking\n",
    "from research_agent.core.query import Query\n",
    "from research_agent.core.gene_hyde import GenStatementHyde\n",
    "import re\n",
    "# 配置日志\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "\n",
    "class ReferenceProcessor:\n",
    "    \"\"\"处理文献引用的主类，整合所有相关功能\"\"\"\n",
    "\n",
    "    def __init__(self, concurrency: int = 30):\n",
    "        \"\"\"\n",
    "        初始化处理器\n",
    "\n",
    "        Args:\n",
    "            concurrency: 并发处理的最大数量\n",
    "        \"\"\"\n",
    "        self.embedding_model = EmbeddingModel_speed()\n",
    "        self.gene_statement_hyde = GenStatementHyde()\n",
    "        self.query = Query()\n",
    "\n",
    "    async def _update_sentence(self, sentences: List[str], evidence_index: List[int], reference_info: str) -> Tuple[Optional[int], Optional[str]]:\n",
    "        \"\"\"\n",
    "        更新句子信息\n",
    "\n",
    "        Args:\n",
    "            sentences: 句子列表\n",
    "            evidence_index: 证据索引\n",
    "            reference_info: 引用信息\n",
    "\n",
    "        Returns:\n",
    "            更新后的目标索引和句子内容\n",
    "        \"\"\"\n",
    "        if evidence_index:\n",
    "            target_index = max(evidence_index)\n",
    "            sentences[target_index] += reference_info\n",
    "            return target_index, sentences[target_index]\n",
    "        return None, None\n",
    "\n",
    "    async def _process_papers_content(self, retrieved_paper_list: List[Dict[str, Any]]) -> List[str]:\n",
    "        \"\"\"\n",
    "        处理论文内容\n",
    "\n",
    "        Args:\n",
    "            retrieved_paper_list: 检索到的论文列表\n",
    "\n",
    "        Returns:\n",
    "            处理后的论文内容列表\n",
    "        \"\"\"\n",
    "        loop = asyncio.get_running_loop()\n",
    "        papers_content = await loop.run_in_executor(\n",
    "            None,\n",
    "            lambda: [r[\"entity\"][\"chunk_text\"] for r in retrieved_paper_list]\n",
    "        )\n",
    "        return await loop.run_in_executor(None, chunking, papers_content)\n",
    "\n",
    "    async def _handle_rerank_failure(self, statement: str, rerank_papers_content: List[str],\n",
    "                                     rerank_papers: List[Dict[str, Any]], rerank_threshold: float) -> str:\n",
    "        \"\"\"\n",
    "        处理重排序失败的情况\n",
    "\n",
    "        Args:\n",
    "            statement: 查询语句\n",
    "            rerank_papers_content: 论文内容\n",
    "            rerank_papers: 论文信息\n",
    "            rerank_threshold: 重排序阈值\n",
    "\n",
    "        Returns:\n",
    "            处理后的引用信息\n",
    "        \"\"\"\n",
    "        chunk_size = 8  # 每8个文档一次rerank\n",
    "        sub_rerank_retrieved_paper_list = [\n",
    "            rerank_papers[i:i + chunk_size] for i in range(0, len(rerank_papers), chunk_size)\n",
    "        ]\n",
    "        sub_rerank_retrieved_paper_content = [\n",
    "            chunking(rerank_papers_content[i:i + chunk_size]) for i in range(0, len(rerank_papers_content), chunk_size)\n",
    "        ]\n",
    "        # 创建异步任务\n",
    "        rerank_tasks = [self.embedding_model.rerank_documents(\n",
    "            query=statement, documents=sublist) for sublist in sub_rerank_retrieved_paper_content]\n",
    "        # 并行执行所有任务\n",
    "        rerank_results = await asyncio.gather(*rerank_tasks)\n",
    "        logger.info(\n",
    "            f\"第二次重排序结果: {[result['relevance_score'] for rerank_result in rerank_results for result in rerank_result['results']]}\")\n",
    "        reference_info = \"\"\n",
    "        for rerank_result, papers_list in zip(rerank_results, sub_rerank_retrieved_paper_list):\n",
    "            if not rerank_result:\n",
    "                continue\n",
    "            for result in rerank_result[\"results\"]:\n",
    "                if result[\"relevance_score\"] > rerank_threshold:\n",
    "                    score = result[\"relevance_score\"]\n",
    "                    paper_index = result[\"index\"]\n",
    "                    reference_paper = papers_list[paper_index]\n",
    "                    paper_title = re.sub(\n",
    "                        r'<sup>.*?</sup>', '', reference_paper['entity']['paper_title'])\n",
    "                    original_filename = re.findall(\n",
    "                        r\"Data_+(.*?)_with\", reference_paper['entity']['original_filename'])[0]\n",
    "                    original_filename = re.sub(r\"[\\d]\", \"\", original_filename)\n",
    "                    original_filename = re.sub(r\"[_]\", \" \", original_filename)\n",
    "                    year = reference_paper['entity']['year']\n",
    "                    chunk_id = reference_paper['entity']['chunk_id']\n",
    "                    reference_info += (\n",
    "                        f\"<sup>{paper_title} \"\n",
    "                        f\"{original_filename},\"\n",
    "                        f\"{year}, \"\n",
    "                        f\"chunk {chunk_id}</sup>\"\n",
    "                        f\"<ss>{score}</ss>\"\n",
    "                    )\n",
    "        return reference_info\n",
    "\n",
    "    def _format_reference(self, reference_paper: Dict[str, Any]) -> str:\n",
    "        \"\"\"\n",
    "        格式化引用信息\n",
    "\n",
    "        Args:\n",
    "            reference_paper: 引用论文信息\n",
    "\n",
    "        Returns:\n",
    "            格式化后的引用字符串\n",
    "        \"\"\"\n",
    "        paper_title = re.sub(\n",
    "            r'<sup>.*?</sup>', '', reference_paper['entity']['paper_title'])\n",
    "        original_filename = re.findall(\n",
    "            r\"Data_+(.*?)_with\", reference_paper['entity']['original_filename'])[0]\n",
    "        original_filename = re.sub(r\"[\\d]\", \"\", original_filename)\n",
    "        original_filename = re.sub(r\"[_]\", \" \", original_filename)\n",
    "        year = reference_paper['entity']['year']\n",
    "        chunk_id = reference_paper['entity']['chunk_id']\n",
    "        return (f\"<sup>{paper_title} \"\n",
    "                f\"{original_filename},\"\n",
    "                f\"{year}, \"\n",
    "                f\"chunk {chunk_id}</sup>\")\n",
    "    \n",
    "    async def process_hyde(self, hyde: str, original_statement: str, evidence_index: List[int],\n",
    "                           retrieved_papers: List[Dict[str, Any]], keywords: str,\n",
    "                           merge_section: str, cos_threshold: float = 0.5,\n",
    "                           rerank_threshold: float = 19) -> Tuple[Optional[int], Optional[str]]:\n",
    "        \"\"\"\n",
    "        处理单个HYDE\n",
    "\n",
    "        Args:\n",
    "            hyde: HYDE语句\n",
    "            evidence_index: 证据索引\n",
    "            retrieved_papers: 检索到的论文\n",
    "            keywords: 关键词\n",
    "            merge_section: 合并的章节\n",
    "            cos_threshold: 余弦相似度阈值\n",
    "            rerank_threshold: 重排序阈值\n",
    "\n",
    "        Returns:\n",
    "            处理结果的目标索引和更新后的句子\n",
    "        \"\"\"\n",
    "        loop = asyncio.get_running_loop()\n",
    "        logger.info(\"开始处理 HYDE: %s\", hyde)\n",
    "        start_time = time.time()\n",
    "\n",
    "        sentences = await loop.run_in_executor(None, sent_tokenize, merge_section)\n",
    "        failed_papers_id = []\n",
    "        reference_info = \"\"\n",
    "\n",
    "        for attempt in range(2):\n",
    "            logger.info(\"尝试次数: %d\", attempt + 1)\n",
    "            papers_content = await self._process_papers_content(retrieved_papers)\n",
    "\n",
    "            cos_scores = await self.embedding_model.get_cos_scores(original_statement, papers_content)\n",
    "            logger.info(\"计算的余弦分数: %s\", cos_scores)\n",
    "            if attempt+1 == 2 and all(score < cos_threshold for score in cos_scores):\n",
    "                print(\"尝试次数: %d\", attempt + 1)\n",
    "                break\n",
    "            if any(score > cos_threshold for score in cos_scores):\n",
    "                try:\n",
    "                    logger.info(\"开始重排序\")\n",
    "                    rerank_papers = [y for x, y in enumerate(\n",
    "                        retrieved_papers) if cos_scores[x] > cos_threshold]\n",
    "                    rerank_papers_content = [\n",
    "                        x[\"entity\"][\"chunk_text\"] for x in rerank_papers]\n",
    "                    rerank_results = await self.embedding_model.rerank_documents(\n",
    "                        query=original_statement, documents=rerank_papers_content)\n",
    "                    if rerank_results and any(result[\"relevance_score\"] > rerank_threshold for result in rerank_results[\"results\"]):\n",
    "                        logger.info(\"第一次重排序结果: %s\", [\n",
    "                                    result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "                        reference_info = self._process_rerank_results(\n",
    "                            rerank_results, rerank_papers, rerank_threshold)\n",
    "                        logger.info(\"第一次重排序成功\")\n",
    "                        break\n",
    "                    if all(result[\"relevance_score\"] < rerank_threshold for result in rerank_results[\"results\"]):\n",
    "                        logger.info(\"第一次重排序结果: %s\", [\n",
    "                                    result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "                        failed_papers_id += [r[\"id\"] for r in retrieved_papers]\n",
    "                        logger.warning(\n",
    "                            \"第一次重排序分数均小于 %f，重新生成 HYDE。\", rerank_threshold)\n",
    "                        new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(statements=hyde, keywords=keywords)\n",
    "                        rerank_retrieved_paper_tasks = [self.query.query_by_content(\n",
    "                            q, top_k=10) for q in [new_hyde] + core_questions]\n",
    "                        rerank_retrieved_paper_list = await asyncio.gather(*rerank_retrieved_paper_tasks)\n",
    "\n",
    "                        unique_ids = set()\n",
    "                        rerank_retrieved_paper_set_list = []\n",
    "                        # 遍历嵌套列表，去重\n",
    "                        for sublist in rerank_retrieved_paper_list:\n",
    "                            for item in sublist:\n",
    "                                if item[\"id\"] not in unique_ids:\n",
    "                                    unique_ids.add(item[\"id\"])  # 添加ID到集合\n",
    "                                    rerank_retrieved_paper_set_list.append(\n",
    "                                        item)  # 添加到去重后的列表\n",
    "                        rerank_retrieved_paper_list = rerank_retrieved_paper_set_list\n",
    "                        rerank_retrieved_paper_list = [\n",
    "                            paper for paper in rerank_retrieved_paper_list if paper[\"id\"] not in failed_papers_id]\n",
    "                        logger.info(\n",
    "                            f\"第二次重排序检索到的论文数量: {len(rerank_retrieved_paper_list)}\")\n",
    "                        rerank_papers_content = [\n",
    "                            x[\"entity\"][\"chunk_text\"] for x in rerank_retrieved_paper_list]\n",
    "                        rerank_papers_content = await loop.run_in_executor(None, chunking, rerank_papers_content)\n",
    "                        logger.info(\"第二次重排序\")\n",
    "                        reference_info = await self._handle_rerank_failure(\n",
    "                            statement=original_statement, rerank_papers_content=rerank_papers_content, rerank_papers=rerank_retrieved_paper_list, rerank_threshold=rerank_threshold)\n",
    "                        break\n",
    "                except Exception as e:\n",
    "                    logger.error(\"重排序失败: %s\", e)\n",
    "                    reference_info = await self._handle_rerank_failure(\n",
    "                        hyde, papers_content, retrieved_papers, rerank_threshold)\n",
    "                    break\n",
    "            \n",
    "            # 如果相似度不够，重新生成HYDE\n",
    "            failed_papers_id.extend(paper[\"id\"] for paper in retrieved_papers)\n",
    "            new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(\n",
    "                statements=hyde, keywords=keywords)\n",
    "\n",
    "            # 重新检索文献\n",
    "            retrieved_papers = await self._retrieve_new_papers(\n",
    "                new_hyde, core_questions, failed_papers_id)\n",
    "            hyde = new_hyde\n",
    "\n",
    "        logger.info(\"处理 HYDE 完成，耗时: %.2f 秒\", time.time() - start_time)\n",
    "        return await self._update_sentence(sentences, evidence_index, reference_info)\n",
    "\n",
    "    async def _retrieve_new_papers(self, hyde: str, core_questions: List[str],\n",
    "                                   failed_papers_id: List[str]) -> List[Dict[str, Any]]:\n",
    "        \"\"\"\n",
    "        检索新的论文\n",
    "\n",
    "        Args:\n",
    "            hyde: HYDE语句\n",
    "            core_questions: 核心问题列表\n",
    "            failed_papers_id: 失败的论文ID列表\n",
    "\n",
    "        Returns:\n",
    "            检索到的新论文列表\n",
    "        \"\"\"\n",
    "        queries = [hyde] + core_questions\n",
    "        tasks = [self.query.query_by_content(q, top_k=10) for q in queries]\n",
    "        results = await asyncio.gather(*tasks)\n",
    "        cos_unique_ids = set()\n",
    "        cos_retrieved_paper_set_list = []\n",
    "        # 遍历嵌套列表，去重\n",
    "        for sublist in results:\n",
    "            for item in sublist:\n",
    "                if item[\"id\"] not in cos_unique_ids:\n",
    "                    cos_unique_ids.add(item[\"id\"])  # 添加ID到集合\n",
    "                    cos_retrieved_paper_set_list.append(\n",
    "                        item)  # 添加到去重后的列表\n",
    "        # 展平结果并过滤已失败的论文\n",
    "        return [paper for paper in cos_retrieved_paper_set_list if paper[\"id\"] not in failed_papers_id]\n",
    "\n",
    "    def _process_rerank_results(self, rerank_results: Dict[str, Any],\n",
    "                                papers: List[Dict[str, Any]],\n",
    "                                threshold: float) -> str:\n",
    "        \"\"\"\n",
    "        处理重排序结果\n",
    "\n",
    "        Args:\n",
    "            rerank_results: 重排序结果\n",
    "            papers: 论文列表\n",
    "            threshold: 阈值\n",
    "\n",
    "        Returns:\n",
    "            处理后的引用信息\n",
    "        \"\"\"\n",
    "        reference_info = \"\"\n",
    "        for item in rerank_results[\"results\"]:\n",
    "            if item[\"relevance_score\"] > threshold:\n",
    "                paper_index = item[\"index\"]\n",
    "                score = item[\"relevance_score\"]\n",
    "                if paper_index < len(papers):\n",
    "                    reference_info += self._format_reference(\n",
    "                        papers[paper_index]) + f\"<ss>{score}</ss>\"\n",
    "        return reference_info\n",
    "\n",
    "    async def process_drafts(self, parsed_draft_infos: List[Dict[str, Any]]) -> List[List[Tuple[Optional[int], Optional[str]]]]:\n",
    "        \"\"\"\n",
    "        处理多个草稿\n",
    "\n",
    "        Args:\n",
    "            parsed_draft_infos: 解析后的草稿信息列表\n",
    "\n",
    "        Returns:\n",
    "            处理结果列表\n",
    "        \"\"\"\n",
    "        all_tasks = []\n",
    "        for draft in parsed_draft_infos:\n",
    "            task_group = [\n",
    "                self.process_hyde(\n",
    "                    hyde, original_statement, evidence_index, retrieved_papers,\n",
    "                    keywords, draft[\"section\"]\n",
    "                )\n",
    "                for hyde, original_statement, evidence_index, retrieved_papers, keywords in zip(\n",
    "                    draft[\"hyde\"],\n",
    "                    draft[\"original_statement\"],\n",
    "                    draft[\"evidence_spans\"],\n",
    "                    draft[\"retrieved_papers\"],\n",
    "                    draft[\"keywords\"]\n",
    "                )\n",
    "            ]\n",
    "            all_tasks.extend(task_group)\n",
    "\n",
    "        results = await asyncio.gather(*all_tasks)\n",
    "\n",
    "        # 重组结果\n",
    "        final_output = []\n",
    "        current_index = 0\n",
    "        for draft in parsed_draft_infos:\n",
    "            count = len(draft[\"hyde\"])\n",
    "            final_output.append(results[current_index:current_index + count])\n",
    "            current_index += count\n",
    "\n",
    "        return final_output\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 121,
   "metadata": {},
   "outputs": [],
   "source": [
    "reference_processor = ReferenceProcessor()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "await reference_processor.process_hyde(hyde,original_statement,evidence_index,retrieved_papers,keywords, merge_section)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:__main__:开始处理 HYDE: The technological roadmap of multi-model large models is a critical research area shaping the future of artificial intelligence.\n",
      "INFO:__main__:开始处理 HYDE: This survey aims to assess the current state of the field, identify key challenges, and provide insights into future development paths.\n",
      "INFO:__main__:开始处理 HYDE: The context of the survey is rooted in the rapid advancements in machine learning, particularly in natural language processing (NLP) and computer vision, which have driven the creation of multi-model large models.\n",
      "INFO:__main__:开始处理 HYDE: These models are designed to integrate information from diverse sources such as text, images, and audio for complex tasks.\n",
      "INFO:__main__:开始处理 HYDE: The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.\n",
      "INFO:__main__:开始处理 HYDE: The working memory module is a crucial element for improving the efficiency and generalization of Transformer-based reinforcement learning methods.\n",
      "INFO:__main__:开始处理 HYDE: It addresses the limitations of previous approaches that heavily rely on model size and inefficient data learning.\n",
      "INFO:__main__:开始处理 HYDE: This module enables models to store, integrate, and retrieve training information, leading to enhanced performance across diverse tasks.\n",
      "INFO:__main__:开始处理 HYDE: Drawing inspiration from cognitive science and neural network models with memory mechanisms, the working memory module can be implemented in various ways.\n",
      "INFO:__main__:开始处理 HYDE: Examples include Neural Turing Machines (NTMs) by Graves et al.\n",
      "INFO:__main__:开始处理 HYDE: Memory networks by Sukhbaatar et al.\n",
      "INFO:__main__:开始处理 HYDE: Goyal et al.\n",
      "INFO:__main__:开始处理 HYDE: Our approach utilizes LoRA (Low-Rank Adaptation) to enhance the working memory module, capitalizing on its established effectiveness in simple reinforcement learning settings and natural language processing tasks.\n",
      "INFO:__main__:开始处理 HYDE: Integrating a working memory module into Transformer-based models could offer valuable insights for revisiting earlier memory-augmentation methods, especially with the emergence of more powerful foundation models.\n",
      "INFO:__main__:开始处理 HYDE: This could potentially lead to more efficient and generalized reinforcement learning models.\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:__main__:尝试次数: 1\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # 7. Conclusions, limitations, and future work\n",
      "Tra...\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.57074744 0.4938121  0.46849129 0.57125291 0.56540449 0.49774678\n",
      " 0.51641046 0.4857258  0.45504805 0.49365705]\n",
      "INFO:__main__:计算的余弦分数: [0.57074744 0.4938121  0.46849129 0.57125291 0.56540449 0.49774678\n",
      " 0.51641046 0.4857258  0.45504805 0.49365705]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.4283789  0.36246407 0.36131412 0.41660649 0.38122209 0.39252575\n",
      " 0.36496495 0.38862661 0.40283638 0.38146228]\n",
      "INFO:__main__:计算的余弦分数: [0.4283789  0.36246407 0.36131412 0.41660649 0.38122209 0.39252575\n",
      " 0.36496495 0.38862661 0.40283638 0.38146228]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.62708851 0.55009703 0.6354253  0.61800397 0.56607525 0.52926214\n",
      " 0.60257279 0.61582544 0.60748904 0.57569331]\n",
      "INFO:__main__:计算的余弦分数: [0.62708851 0.55009703 0.6354253  0.61800397 0.56607525 0.52926214\n",
      " 0.60257279 0.61582544 0.60748904 0.57569331]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 0\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.5628774  0.47693741 0.54308276 0.50353496 0.55998472 0.54515436\n",
      " 0.4901492  0.51300066 0.5420287  0.49132406]\n",
      "INFO:__main__:计算的余弦分数: [0.5628774  0.47693741 0.54308276 0.50353496 0.55998472 0.54515436\n",
      " 0.4901492  0.51300066 0.5420287  0.49132406]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.50828778 0.48008691 0.51824007 0.53944551 0.52689016 0.45585887\n",
      " 0.52420195 0.47155531 0.52573195 0.50992854]\n",
      "INFO:__main__:计算的余弦分数: [0.50828778 0.48008691 0.51824007 0.53944551 0.52689016 0.45585887\n",
      " 0.52420195 0.47155531 0.52573195 0.50992854]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.42987102 0.4645887  0.48644021 0.49641587 0.44917732 0.43145283\n",
      " 0.43082161 0.45755745 0.45702148 0.3886961 ]\n",
      "INFO:__main__:计算的余弦分数: [0.42987102 0.4645887  0.48644021 0.49641587 0.44917732 0.43145283\n",
      " 0.43082161 0.45755745 0.45702148 0.3886961 ]\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 14, 缓存命中: 0, 新处理: 14, 失败: 0\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.56792703 0.50334781 0.5383165  0.45375352 0.47836549 0.4970826\n",
      " 0.50644651 0.48767178 0.55267291 0.51876074]\n",
      "INFO:__main__:计算的余弦分数: [0.56792703 0.50334781 0.5383165  0.45375352 0.47836549 0.4970826\n",
      " 0.50644651 0.48767178 0.55267291 0.51876074]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.61162933 0.68175836 0.63396126 0.5662643  0.53335718 0.56372141\n",
      " 0.59056272 0.62188251 0.57191199 0.58898896]\n",
      "INFO:__main__:计算的余弦分数: [0.61162933 0.68175836 0.63396126 0.5662643  0.53335718 0.56372141\n",
      " 0.59056272 0.62188251 0.57191199 0.58898896]\n",
      "INFO:__main__:开始重排序\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# 7. Conclusions, limitations, and future work\\nTra...']\n",
      "INFO:__main__:第一次重排序结果: [22.25, 20.578125, 19.296875, 18.828125]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 3.55 秒\n",
      "INFO:__main__:第一次重排序结果: [20.328125, 18.09375, 17.296875, 17.140625, 17.046875, 16.75, 16.609375, 16.09375, 15.90625, 15.828125]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 3.55 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.53867458 0.55822313 0.59841678 0.55580466 0.55520306 0.5060529\n",
      " 0.46331051 0.49078055 0.47439448 0.49311715 0.50141765 0.52303235\n",
      " 0.51998725 0.51608196]\n",
      "INFO:__main__:计算的余弦分数: [0.53867458 0.55822313 0.59841678 0.55580466 0.55520306 0.5060529\n",
      " 0.46331051 0.49078055 0.47439448 0.49311715 0.50141765 0.52303235\n",
      " 0.51998725 0.51608196]\n",
      "INFO:__main__:开始重排序\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # 3.3PROPOSEDAPPROACH\n",
      "We hypothesize that error se...\n",
      "INFO:__main__:向量生成完成，总数: 10, 缓存命中: 0, 新处理: 10, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # Bibliography\n",
      "C. Abraham, P. A. Cornillon, E. Mat...\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # 4 RELATED WORK\n",
      "Neural temporal point processes. ...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # 3.3PROPOSEDAPPROACH\n",
      "We hypothesize that error se...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.47264606 0.44200895 0.50605374 0.44488974 0.42677197 0.53686537\n",
      " 0.41076088 0.42425459 0.         0.50341732]\n",
      "INFO:__main__:计算的余弦分数: [0.47264606 0.44200895 0.50605374 0.44488974 0.42677197 0.53686537\n",
      " 0.41076088 0.42425459 0.         0.50341732]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.52706096 0.50206372 0.50226098 0.         0.41724326 0.54199399\n",
      " 0.47854969 0.48663941 0.50113681 0.46361418]\n",
      "INFO:__main__:计算的余弦分数: [0.52706096 0.50206372 0.50226098 0.         0.41724326 0.54199399\n",
      " 0.47854969 0.48663941 0.50113681 0.46361418]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:向量生成完成，总数: 18, 缓存命中: 0, 新处理: 18, 失败: 0\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 12, 缓存命中: 0, 新处理: 12, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# 3.3PROPOSEDAPPROACH\\nWe hypothesize that error se...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.71887226 0.68620088 0.58892475 0.67970874 0.54101735 0.56239028\n",
      " 0.64355693 0.6114435  0.57973617 0.65247568 0.         0.55742942]\n",
      "INFO:__main__:计算的余弦分数: [0.71887226 0.68620088 0.58892475 0.67970874 0.54101735 0.56239028\n",
      " 0.64355693 0.6114435  0.57973617 0.65247568 0.         0.55742942]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:第一次重排序结果: [18.96875, 17.515625, 17.234375, 15.4765625, 14.9296875, 12.6640625, 12.3984375]\n",
      "WARNING:__main__:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.49986381 0.4870724  0.52608721 0.53549561 0.48169182 0.43861536\n",
      " 0.56062032 0.60219688 0.53158594 0.59639553 0.55264788 0.52185556\n",
      " 0.54744687 0.55942772 0.55132001 0.55417561 0.56834839 0.57994538]\n",
      "INFO:__main__:计算的余弦分数: [0.49986381 0.4870724  0.52608721 0.53549561 0.48169182 0.43861536\n",
      " 0.56062032 0.60219688 0.53158594 0.59639553 0.55264788 0.52185556\n",
      " 0.54744687 0.55942772 0.55132001 0.55417561 0.56834839 0.57994538]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:第一次重排序结果: [19.84375, 19.015625, 15.8828125, 15.2890625, 14.7109375, 11.5234375]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 4.54 秒\n",
      "INFO:__main__:第一次重排序结果: [22.5625, 21.40625, 21.1875, 21.15625, 20.34375, 20.265625, 19.984375, 18.734375, 18.65625, 18.359375]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 4.53 秒\n",
      "INFO:__main__:第一次重排序结果: [21.875, 18.09375, 17.828125]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 4.54 秒\n",
      "INFO:__main__:第一次重排序结果: [20.59375, 18.34375, 18.3125, 18.1875, 17.015625, 13.921875, 13.671875, 13.640625, 13.171875, 11.09375]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 4.55 秒\n",
      "INFO:__main__:第一次重排序结果: [17.1875, 16.921875, 16.890625, 16.78125, 16.25, 16.25, 15.5078125]\n",
      "WARNING:__main__:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 18, 缓存命中: 0, 新处理: 18, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# 4 RELATED WORK\\nNeural temporal point processes. ...']\n",
      "INFO:__main__:向量生成完成，总数: 20, 缓存命中: 0, 新处理: 20, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# Bibliography\\nC. Abraham, P. A. Cornillon, E. Mat...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.68484025 0.49520993 0.53402506 0.51949856 0.61650476 0.58060661\n",
      " 0.         0.48954466 0.48302981 0.58001661 0.4845559  0.64391172\n",
      " 0.53448824 0.511099   0.58270777 0.62928483 0.62895714 0.5156075 ]\n",
      "INFO:__main__:计算的余弦分数: [0.68484025 0.49520993 0.53402506 0.51949856 0.61650476 0.58060661\n",
      " 0.         0.48954466 0.48302981 0.58001661 0.4845559  0.64391172\n",
      " 0.53448824 0.511099   0.58270777 0.62928483 0.62895714 0.5156075 ]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.2194976  0.26952326 0.38871439 0.32202461 0.25202465 0.39969267\n",
      " 0.35466867 0.27357202 0.21807072 0.         0.59989652 0.47923465\n",
      " 0.49782184 0.45222982 0.477702   0.4773127  0.48680946 0.49848782\n",
      " 0.49219538 0.45534522]\n",
      "INFO:__main__:计算的余弦分数: [0.2194976  0.26952326 0.38871439 0.32202461 0.25202465 0.39969267\n",
      " 0.35466867 0.27357202 0.21807072 0.         0.59989652 0.47923465\n",
      " 0.49782184 0.45222982 0.477702   0.4773127  0.48680946 0.49848782\n",
      " 0.49219538 0.45534522]\n",
      "INFO:__main__:开始重排序\n",
      "INFO:__main__:第一次重排序结果: [20.6875]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 5.32 秒\n",
      "INFO:__main__:第一次重排序结果: [17.125, 15.015625, 13.8828125, 13.703125, 12.765625]\n",
      "WARNING:__main__:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:__main__:第一次重排序结果: [20.875, 20.53125, 20.15625, 19.734375, 19.296875, 18.484375, 18.265625, 18.09375, 17.375, 16.28125, 15.90625]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 5.88 秒\n",
      "INFO:__main__:第一次重排序结果: [19.921875, 19.609375, 18.953125, 18.609375, 18.328125, 17.046875, 16.46875, 15.6640625, 14.890625, 14.3984375, 13.484375, 12.71875, 12.09375]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 5.89 秒\n",
      "INFO:__main__:第一次重排序结果: [19.171875, 17.390625, 17.015625, 16.109375, 15.953125, 15.7734375, 15.484375, 15.1640625, 14.7578125, 14.390625, 14.09375, 13.109375, 11.0546875, 10.5703125]\n",
      "INFO:__main__:第一次重排序成功\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 6.17 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:__main__:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # Conclusion\n",
      "In this paper, we propose a generally...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # Impact Statements\n",
      "This paper presents work whose...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 28, 缓存命中: 5, 新处理: 23, 失败: 0\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.43157454 0.40676314 0.29675821 0.37532651 0.44986858 0.37723414\n",
      " 0.42267692 0.49200134 0.439022   0.37556143 0.31200593 0.35235644\n",
      " 0.37541929 0.37483588 0.39088131 0.37062433 0.26109237 0.39659095\n",
      " 0.32707917 0.30836836 0.37155655 0.36544477 0.39915368 0.38391944\n",
      " 0.34024043 0.37861356 0.43444721 0.39991556]\n",
      "INFO:__main__:计算的余弦分数: [0.43157454 0.40676314 0.29675821 0.37532651 0.44986858 0.37723414\n",
      " 0.42267692 0.49200134 0.439022   0.37556143 0.31200593 0.35235644\n",
      " 0.37541929 0.37483588 0.39088131 0.37062433 0.26109237 0.39659095\n",
      " 0.32707917 0.30836836 0.37155655 0.36544477 0.39915368 0.38391944\n",
      " 0.34024043 0.37861356 0.43444721 0.39991556]\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 12.35 秒\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 45, 缓存命中: 2, 新处理: 43, 失败: 3\n",
      "WARNING:__main__:失败的文本: ['# Conclusion\\nIn this paper, we propose a generally...', '# Impact Statements\\nThis paper presents work whose...', '# 7. Conclusions, limitations, and future work\\nTra...']\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.44040847 0.42694476 0.44505407 0.40239924 0.42857454 0.38775103\n",
      " 0.39536046 0.37719911 0.44415014 0.3386558  0.43810456 0.38704178\n",
      " 0.34464522 0.32361304 0.39745815 0.37810142 0.39248458 0.43531106\n",
      " 0.4123325  0.40893341 0.39775532 0.35531559 0.41846193 0.41532431\n",
      " 0.46127026 0.         0.39014104 0.36780062 0.3826074  0.45327391\n",
      " 0.36289821 0.42584729 0.37572245 0.38371506 0.         0.44490456\n",
      " 0.41876186 0.44211992 0.34815702 0.45633978 0.38518767 0.\n",
      " 0.44432902 0.40562141 0.35446803]\n",
      "INFO:__main__:计算的余弦分数: [0.44040847 0.42694476 0.44505407 0.40239924 0.42857454 0.38775103\n",
      " 0.39536046 0.37719911 0.44415014 0.3386558  0.43810456 0.38704178\n",
      " 0.34464522 0.32361304 0.39745815 0.37810142 0.39248458 0.43531106\n",
      " 0.4123325  0.40893341 0.39775532 0.35531559 0.41846193 0.41532431\n",
      " 0.46127026 0.         0.39014104 0.36780062 0.3826074  0.45327391\n",
      " 0.36289821 0.42584729 0.37572245 0.38371506 0.         0.44490456\n",
      " 0.41876186 0.44211992 0.34815702 0.45633978 0.38518767 0.\n",
      " 0.44432902 0.40562141 0.35446803]\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 12.60 秒\n",
      "INFO:__main__:第二次重排序检索到的论文数量: 33\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "尝试次数: %d 2\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:__main__:第二次重排序\n",
      "INFO:__main__:第二次重排序检索到的论文数量: 35\n",
      "INFO:__main__:第二次重排序\n",
      "INFO:__main__:第二次重排序结果: [16.15625, 15.3984375, 15.15625, 14.6328125, 14.125, 13.390625, 13.359375, 12.3984375, 17.109375, 15.2421875, 14.9765625, 14.9296875, 14.828125, 14.0234375, 13.78125, 13.5078125, 18.65625, 16.734375, 15.8515625, 15.5546875, 15.1640625, 14.9921875, 13.484375, 13.390625, 16.4375, 16.25, 15.71875, 15.4765625, 15.40625, 15.3828125, 15.2890625, 14.9921875, 15.6875]\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 13.85 秒\n",
      "INFO:__main__:第二次重排序结果: [17.875, 17.734375, 16.609375, 15.8984375, 15.4765625, 15.25, 12.984375, 12.7890625, 18.4375, 15.359375, 15.296875, 14.15625, 13.28125, 12.625, 12.3671875, 12.296875, 16.09375, 15.625, 13.7109375, 13.5390625, 12.78125, 12.265625, 12.03125, 11.7578125, 16.578125, 15.4765625, 15.2421875, 14.75, 13.9453125, 13.0, 12.484375, 10.8125, 15.125, 14.9609375, 13.328125]\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 14.17 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:__main__:第二次重排序检索到的论文数量: 29\n",
      "INFO:__main__:第二次重排序\n",
      "INFO:__main__:第二次重排序结果: [17.21875, 16.703125, 16.671875, 16.328125, 15.9140625, 15.0, 14.6953125, 12.6484375, 17.8125, 17.265625, 16.515625, 16.390625, 16.28125, 14.9921875, 14.0625, 13.34375, 17.90625, 16.953125, 16.609375, 15.6640625, 15.484375, 15.46875, 15.2890625, 13.625, 16.296875, 15.28125, 13.671875, 13.078125, 12.0625]\n",
      "INFO:__main__:处理 HYDE 完成，耗时: 16.63 秒\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[[(0,\n",
       "   '\\n## 1 Introduction\\n\\nThe technological roadmap of multi-model large models is a critical research area shaping the future of artificial intelligence.<sup>Structural Information Guided Multimodal Pre-training for Vehicle-centric Perception AAAI,2024, chunk 1</sup><ss>20.59375</ss>'),\n",
       "  (1,\n",
       "   'This survey aims to assess the current state of the field, identify key challenges, and provide insights into future development paths.'),\n",
       "  (2,\n",
       "   'The context of the survey is rooted in the rapid advancements in machine learning, particularly in natural language processing (NLP) and computer vision, which have driven the creation of multi-model large models.'),\n",
       "  (3,\n",
       "   'These models are designed to integrate information from diverse sources such as text, images, and audio for complex tasks.'),\n",
       "  (4,\n",
       "   'The survey will draw upon a range of literature, including recent papers that have deepened our understanding of these models.')],\n",
       " [(0,\n",
       "   '\\n### 2.1 Working Memory Module\\n\\nThe working memory module is a crucial element for improving the efficiency and generalization of Transformer-based reinforcement learning methods.<sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 0</sup><ss>20.875</ss><sup>Layer-wise Fusion with Modality Independence Modeling for Multi-modal Emotion Recognition. ACL ,2023, chunk 4</sup><ss>20.53125</ss><sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 3</sup><ss>20.15625</ss><sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 1</sup><ss>19.734375</ss><sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 2</sup><ss>19.296875</ss>'),\n",
       "  (1,\n",
       "   'It addresses the limitations of previous approaches that heavily rely on model size and inefficient data learning.'),\n",
       "  (2,\n",
       "   'This module enables models to store, integrate, and retrieve training information, leading to enhanced performance across diverse tasks.<sup>Layer-wise Fusion with Modality Independence Modeling for Multi-modal Emotion Recognition. ACL ,2023, chunk 4</sup><ss>19.84375</ss><sup>Mplug-Owl2: Revolutionizing Multi-modal Large Language Model with Modality Collaboration CVPR,2024, chunk 4</sup><ss>19.015625</ss>'),\n",
       "  (3,\n",
       "   'Drawing inspiration from cognitive science and neural network models with memory mechanisms, the working memory module can be implemented in various ways.<sup>A Framework for Inference Inspired by Human Memory Mechanisms ICLR,2024, chunk 0</sup><ss>21.875</ss>'),\n",
       "  (5,\n",
       "   '[16], which combine Turing machine and deep learning concepts to incorporate a content-addressable matrix memory space.<sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 2</sup><ss>19.921875</ss><sup>Token Turing Machines CVPR ,2023, chunk 0</sup><ss>19.609375</ss>'),\n",
       "  (7,\n",
       "   '[32] also adopt a similar approach to facilitate complex reasoning and inference tasks.<sup>Improving Transformers with Probabilistic Attention Keys ICML  International Conference on Machine Learning,2022, chunk 6</sup><ss>19.171875</ss>'),\n",
       "  (9,\n",
       "   '[15] propose a shared global workspace method that fosters information sharing among neural modules, akin to working memory.<sup>Scalable Geometric Fracture Assembly Via Co-creation Space among Assemblers AAAI,2024, chunk 1</sup><ss>20.6875</ss>'),\n",
       "  (10,\n",
       "   'Our approach utilizes LoRA (Low-Rank Adaptation) to enhance the working memory module, capitalizing on its established effectiveness in simple reinforcement learning settings and natural language processing tasks.<sup>LQ-LoRA: Low-rank Plus Quantized Matrix Decomposition for Efficient Language Model Finetuning. ICLR,2024, chunk 0</sup><ss>22.5625</ss><sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 3</sup><ss>21.40625</ss><sup>The Expressive Power of Low-Rank Adaptation. ICLR,2024, chunk 0</sup><ss>21.1875</ss><sup>LORS: Low-rank Residual Structure for Parameter-Efficient Network Stacking CVPR,2024, chunk 1</sup><ss>21.15625</ss><sup>LQ-LoRA: Low-rank Plus Quantized Matrix Decomposition for Efficient Language Model Finetuning. ICLR,2024, chunk 1</sup><ss>20.34375</ss><sup>MELO: Enhancing Model Editing with Neuron-Indexed Dynamic LoRA AAAI,2024, chunk 2</sup><ss>20.265625</ss><sup>Task-Agnostic Low-Rank Adapters for Unseen English Dialects EMNLP ,2023, chunk 2</sup><ss>19.984375</ss>'),\n",
       "  (11,\n",
       "   'Integrating a working memory module into Transformer-based models could offer valuable insights for revisiting earlier memory-augmentation methods, especially with the emergence of more powerful foundation models.<sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 2</sup><ss>22.25</ss><sup>STanHop: Sparse Tandem Hopfield Model for Memory-Enhanced Time Series Prediction ICLR,2024, chunk 9</sup><ss>20.578125</ss><sup>Think Before You Act: Decision Transformers with Internal Working Memory. ICML,2024, chunk 0</sup><ss>19.296875</ss>'),\n",
       "  (12,\n",
       "   'This could potentially lead to more efficient and generalized reinforcement learning models.<sup>Constrained Meta-Reinforcement Learning for Adaptable Safety Guarantee with Differentiable Convex Programming AAAI,2024, chunk 5</sup><ss>20.328125</ss>')]]"
      ]
     },
     "execution_count": 119,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "await reference_processor.process_drafts(pipeline.parsed_draft_infos)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:__main__:开始处理 HYDE: The zero-shot evaluation paradigm, which typically focuses on a single aspect or person in an image, can lead to mismatches between training and evaluation scenarios.\n"
     ]
    }
   ],
   "source": [
    "loop = asyncio.get_running_loop()\n",
    "logger.info(\"开始处理 HYDE: %s\", hyde)\n",
    "start_time = time.time()\n",
    "\n",
    "sentences = await loop.run_in_executor(None, sent_tokenize, merge_section)\n",
    "failed_papers_id = set()\n",
    "reference_info = \"\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "ERROR:__main__:批处理生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "WARNING:__main__:生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n",
      "ERROR:__main__:生成向量最终失败，存储零向量到缓存: # 5.3 Generalized Zero-shot Learning\n",
      "Zero-shot lea...\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:向量生成完成，总数: 16, 缓存命中: 0, 新处理: 16, 失败: 1\n",
      "WARNING:__main__:失败的文本: ['# 5.3 Generalized Zero-shot Learning\\nZero-shot lea...']\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:__main__:计算得到的余弦相似度分数: [0.61280962 0.60437753 0.         0.53433372 0.57368384 0.53970681\n",
      " 0.54408441 0.6052871  0.51955671 0.56328744 0.57955248 0.61548755\n",
      " 0.5953293  0.56974305 0.60411266 0.52085299]\n"
     ]
    }
   ],
   "source": [
    "cos_threshold = 0.5\n",
    "rerank_threshold = 19\n",
    "\n",
    "\n",
    "papers_content = await reference_processor._process_papers_content(retrieved_papers)\n",
    "\n",
    "cos_scores = await reference_processor.embedding_model.get_cos_scores(original_statement, papers_content)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "logger.info(\"开始重排序\")\n",
    "rerank_papers = [y for x, y in enumerate(\n",
    "retrieved_papers) if cos_scores[x] > cos_threshold]\n",
    "rerank_papers_content = [\n",
    "        x[\"entity\"][\"chunk_text\"] for x in rerank_papers]\n",
    "rerank_papers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if any(score > cos_threshold for score in cos_scores):\n",
    "    logger.info(\"开始重排序\")\n",
    "    rerank_papers = [y for x, y in enumerate(\n",
    "        retrieved_papers) if cos_scores[x] > cos_threshold]\n",
    "    rerank_papers_content = [\n",
    "        x[\"entity\"][\"chunk_text\"] for x in rerank_papers]\n",
    "    rerank_results = await reference_processor.embedding_model.rerank_documents(\n",
    "        query=original_statement, documents=rerank_papers_content)\n",
    "    rerank_results"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1.向量cos计算\n",
    "2.大于0.5\n",
    "    2.1 rerank\n",
    "        rerank>19:产生结果\n",
    "        rerank<19:\n",
    "            new_hyde+检索\n",
    "            rerank：>19:产生结果\n",
    "            rerank:<19:产生结果（无引用）\n",
    "3.小于0.5\n",
    "    3.1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "    logger.info(\"尝试次数: %d\", attempt + 1)\n",
    "    papers_content = await self._process_papers_content(retrieved_papers)\n",
    "\n",
    "    cos_scores = await self.embedding_model.get_cos_scores(original_statements, papers_content)\n",
    "    logger.info(\"计算的余弦分数: %s\", cos_scores)\n",
    "\n",
    "    if any(score > cos_threshold for score in cos_scores):\n",
    "        try:\n",
    "            logger.info(\"开始重排序\")\n",
    "            rerank_papers = [y for x, y in enumerate(\n",
    "                retrieved_papers) if cos_scores[x] > cos_threshold]\n",
    "            rerank_papers_content = [\n",
    "                x[\"entity\"][\"chunk_text\"] for x in rerank_papers]\n",
    "            rerank_results = await self.embedding_model.rerank_documents(\n",
    "                query=original_statements, documents=rerank_papers_content)\n",
    "            if rerank_results and any(result[\"relevance_score\"] > rerank_threshold for result in rerank_results[\"results\"]):\n",
    "                logger.info(\"第一次重排序结果: %s\", [\n",
    "                            result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "                reference_info = self._process_rerank_results(\n",
    "                    rerank_results, rerank_papers, rerank_threshold)\n",
    "                logger.info(\"第一次重排序成功\")\n",
    "                break\n",
    "            if all(result[\"relevance_score\"] < rerank_threshold for result in rerank_results[\"results\"]):\n",
    "                logger.info(\"第一次重排序结果: %s\", [\n",
    "                            result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "                failed_papers_id += [r[\"id\"] for r in retrieved_papers]\n",
    "                logger.warning(\n",
    "                    \"第一次重排序分数均小于 %f，重新生成 HYDE。\", rerank_threshold)\n",
    "                new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(statements=hyde, keywords=keywords)\n",
    "                rerank_retrieved_paper_tasks = [self.query.query_by_content(\n",
    "                    q, top_k=10) for q in [new_hyde] + core_questions]\n",
    "                rerank_retrieved_paper_list = await asyncio.gather(*rerank_retrieved_paper_tasks)\n",
    "\n",
    "                unique_ids = set()\n",
    "                rerank_retrieved_paper_set_list = []\n",
    "                # 遍历嵌套列表，去重\n",
    "                for sublist in rerank_retrieved_paper_list:\n",
    "                    for item in sublist:\n",
    "                        if item[\"id\"] not in unique_ids:\n",
    "                            unique_ids.add(item[\"id\"])  # 添加ID到集合\n",
    "                            rerank_retrieved_paper_set_list.append(\n",
    "                                item)  # 添加到去重后的列表\n",
    "                rerank_retrieved_paper_list = rerank_retrieved_paper_set_list\n",
    "                rerank_retrieved_paper_list = [\n",
    "                    paper for paper in rerank_retrieved_paper_list if paper[\"id\"] not in failed_papers_id]\n",
    "                logger.info(\n",
    "                    f\"第二次重排序检索到的论文数量: {len(rerank_retrieved_paper_list)}\")\n",
    "                rerank_papers_content = [\n",
    "                    x[\"entity\"][\"chunk_text\"] for x in rerank_retrieved_paper_list]\n",
    "                rerank_papers_content = await loop.run_in_executor(None, chunking, rerank_papers_content)\n",
    "                logger.info(\"第二次重排序\")\n",
    "                reference_info = await self._handle_rerank_failure(\n",
    "                    statement=original_statements, rerank_papers_content=rerank_papers_content, rerank_papers=rerank_retrieved_paper_list, rerank_threshold=rerank_threshold)\n",
    "                break\n",
    "        except Exception as e:\n",
    "            logger.error(\"重排序失败: %s\", e)\n",
    "            reference_info = await self._handle_rerank_failure(\n",
    "                hyde, papers_content, retrieved_papers, rerank_threshold)\n",
    "            break\n",
    "\n",
    "    # 如果相似度不够，重新生成HYDE\n",
    "    failed_papers_id.extend(paper[\"id\"] for paper in retrieved_papers)\n",
    "    new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(\n",
    "        statements=hyde, keywords=keywords)\n",
    "\n",
    "    # 重新检索文献\n",
    "    retrieved_papers = await self._retrieve_new_papers(\n",
    "        new_hyde, core_questions, failed_papers_id)\n",
    "    hyde = new_hyde\n",
    "\n",
    "logger.info(\"处理 HYDE 完成，耗时: %.2f 秒\", time.time() - start_time)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ReferenceProcessor:\n",
    "    def __init__(self):\n",
    "        self.embedding_model = EmbeddingModel_speed()\n",
    "        self.gene_statement_hyde = GenStatementHyde()\n",
    "        self.query = Query()\n",
    "        # 设置阈值\n",
    "        self.cos_threshold = 0.5\n",
    "        self.rerank_threshold = 19\n",
    "        # 设置并发控制\n",
    "        self.semaphore = asyncio.Semaphore(20)\n",
    "\n",
    "    async def process_hyde_optimized(self, hyde: str, original_statement: str,\n",
    "                                   evidence_index: List[int], retrieved_papers: List[Dict[str, Any]],\n",
    "                                   keywords: str) -> Tuple[Optional[int], Optional[str]]:\n",
    "        \"\"\"优化后的HYDE处理函数\"\"\"\n",
    "        start_time = time.time()\n",
    "        logger.info(f\"开始处理HYDE: {hyde[:50]}...\")\n",
    "        \n",
    "        failed_papers_id = set()  # 使用集合提高性能\n",
    "        \n",
    "        async def process_attempt(current_hyde: str, papers: List[Dict[str, Any]]) -> Optional[str]:\n",
    "            \"\"\"处理单次尝试\"\"\"\n",
    "            # 并行获取papers_content和计算余弦相似度\n",
    "            papers_content = [p[\"entity\"][\"chunk_text\"] for p in papers]\n",
    "            papers_content = await loop.run_in_executor(None, chunking, papers_content)\n",
    "            cos_scores = await self.embedding_model.get_cos_scores(original_statement, papers_content)\n",
    "            logger.info(f\"计算的余弦分数: {cos_scores}\")\n",
    "            \n",
    "            if any(score > self.cos_threshold for score in cos_scores):\n",
    "                try:\n",
    "                    # 筛选高分论文并并行处理重排序\n",
    "                    high_score_indices = [i for i, score in enumerate(cos_scores) if score > self.cos_threshold]\n",
    "                    rerank_papers = [papers[i] for i in high_score_indices]\n",
    "                    rerank_content = [papers_content[i] for i in high_score_indices]\n",
    "                    \n",
    "                    # 批量重排序\n",
    "                    rerank_results = await self._batch_rerank(original_statement, rerank_content, rerank_papers)\n",
    "                    \n",
    "                    if rerank_results and any(result[\"relevance_score\"] > self.rerank_threshold \n",
    "                                            for result in rerank_results[\"results\"]):\n",
    "                        logger.info(\"重排序成功\")\n",
    "                        return self._format_reference_info(rerank_results, rerank_papers)\n",
    "                    \n",
    "                    # 所有分数都低于阈值，需要重新生成HYDE\n",
    "                    failed_papers_id.update(p[\"id\"] for p in papers)\n",
    "                    return await self._generate_and_process_new_hyde(current_hyde, keywords, failed_papers_id)\n",
    "                    \n",
    "                except Exception as e:\n",
    "                    logger.error(f\"重排序失败: {e}\")\n",
    "                    return await self._handle_rerank_failure(original_statement, papers_content, papers)\n",
    "            \n",
    "            # 相似度不够，更新失败ID并重新生成HYDE\n",
    "            failed_papers_id.update(p[\"id\"] for p in papers)\n",
    "            return await self._generate_and_process_new_hyde(current_hyde, keywords, failed_papers_id)\n",
    "\n",
    "        # 最多尝试3次\n",
    "        reference_info = None\n",
    "        for attempt in range(3):\n",
    "            logger.info(f\"第 {attempt + 1} 次尝试\")\n",
    "            reference_info = await process_attempt(hyde, retrieved_papers)\n",
    "            if reference_info:\n",
    "                break\n",
    "        \n",
    "        logger.info(f\"HYDE处理完成，耗时: {time.time() - start_time:.2f}秒\")\n",
    "        return evidence_index[0] if evidence_index else None, reference_info\n",
    "\n",
    "    async def _batch_rerank(self, query: str, documents: List[str], papers: List[Dict[str, Any]], \n",
    "                          batch_size: int = 8) -> Dict[str, Any]:\n",
    "        \"\"\"批量处理重排序请求\"\"\"\n",
    "        results = []\n",
    "        for i in range(0, len(documents), batch_size):\n",
    "            batch_docs = documents[i:i + batch_size]\n",
    "            batch_papers = papers[i:i + batch_size]\n",
    "            \n",
    "            async with self.semaphore:\n",
    "                result = await self.embedding_model.rerank_documents(\n",
    "                    query=query,\n",
    "                    documents=batch_docs\n",
    "                )\n",
    "                if result:\n",
    "                    results.extend(result[\"results\"])\n",
    "        \n",
    "        return {\"results\": results}\n",
    "\n",
    "    async def _generate_and_process_new_hyde(self, hyde: str, keywords: str, \n",
    "                                           failed_ids: Set[str]) -> Optional[str]:\n",
    "        \"\"\"生成新的HYDE并处理\"\"\"\n",
    "        try:\n",
    "            # 并行执行HYDE生成和查询\n",
    "            new_hyde, core_questions = await self.gene_statement_hyde.generate_statement_hyde(\n",
    "                statements=hyde, keywords=keywords\n",
    "            )\n",
    "            \n",
    "            # 并行执行所有查询\n",
    "            query_tasks = [\n",
    "                self.query.query_by_content(q, top_k=10)\n",
    "                for q in [new_hyde] + core_questions\n",
    "            ]\n",
    "            results = await asyncio.gather(*query_tasks)\n",
    "            \n",
    "            # 去重和过滤\n",
    "            unique_papers = await self._process_query_results(results, failed_ids)\n",
    "            \n",
    "            if not unique_papers:\n",
    "                return \"\"\n",
    "\n",
    "            # 处理新的论文集\n",
    "            papers_content = [p[\"entity\"][\"chunk_text\"] for p in unique_papers]\n",
    "            chunked_content = await asyncio.get_running_loop().run_in_executor(None, chunking, papers_content)\n",
    "            \n",
    "            # 重新排序\n",
    "            rerank_result = await self._batch_rerank(hyde, chunked_content, unique_papers)\n",
    "            return self._format_reference_info(rerank_result, unique_papers)\n",
    "            \n",
    "        except Exception as e:\n",
    "            logger.error(f\"生成新HYDE失败: {e}\")\n",
    "            return \"\"\n",
    "\n",
    "    async def _process_query_results(self, results: List[List[Dict]], \n",
    "                                   failed_ids: Set[str]) -> List[Dict]:\n",
    "        \"\"\"处理查询结果，去重和过滤\"\"\"\n",
    "        unique_papers = {}\n",
    "        for result_list in results:\n",
    "            for paper in result_list:\n",
    "                if paper[\"id\"] not in failed_ids and paper[\"id\"] not in unique_papers:\n",
    "                    unique_papers[paper[\"id\"]] = paper\n",
    "        return list(unique_papers.values())\n",
    "\n",
    "    def _format_reference_info(self, rerank_results: Dict[str, Any], \n",
    "                             papers: List[Dict[str, Any]]) -> str:\n",
    "        \"\"\"格式化引用信息\"\"\"\n",
    "        reference_info = []\n",
    "        for result, paper in zip(rerank_results[\"results\"], papers):\n",
    "            if result[\"relevance_score\"] > self.rerank_threshold:\n",
    "                reference_info.append(\n",
    "                    f\"<sup>{re.sub(r'<sup>.*?</sup>', '', paper['entity']['paper_title'])}.\"\n",
    "                    f\"{paper['entity']['original_filename']},\"\n",
    "                    f\"{paper['entity']['year']}, \"\n",
    "                    f\"chunk {paper['entity']['chunk_id']}</sup>\"\n",
    "                    f\"<ss>{result['relevance_score']}</ss>\"\n",
    "                )\n",
    "        return \"\".join(reference_info)\n",
    "\n",
    "    async def _handle_rerank_failure(self, statement: str, papers_content: List[str],\n",
    "                                   papers: List[Dict[str, Any]]) -> str:\n",
    "        \"\"\"处理重排序失败的情况\"\"\"\n",
    "        try:\n",
    "            # 批量处理重排序\n",
    "            rerank_result = await self._batch_rerank(statement, papers_content, papers)\n",
    "            return self._format_reference_info(rerank_result, papers)\n",
    "        except Exception as e:\n",
    "            logger.error(f\"处理重排序失败时发生错误: {e}\")\n",
    "            return \"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:asyncio:第一次重排序结果: [14.0390625]\n",
      "WARNING:asyncio:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:asyncio:第二次重排序检索到的论文数量: 27\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'function' object has no attribute 'run_in_executor'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[112], line 43\u001b[0m\n\u001b[0;32m     39\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[0;32m     40\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m第二次重排序检索到的论文数量: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(rerank_retrieved_paper_list)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m     41\u001b[0m rerank_papers_content \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m     42\u001b[0m                     x[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mentity\u001b[39m\u001b[38;5;124m\"\u001b[39m][\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mchunk_text\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m rerank_retrieved_paper_list]\n\u001b[1;32m---> 43\u001b[0m rerank_papers_content \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m loop\u001b[38;5;241m.\u001b[39mrun_in_executor(\u001b[38;5;28;01mNone\u001b[39;00m, chunking, rerank_papers_content)\n\u001b[0;32m     44\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m第二次重排序\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m     45\u001b[0m reference_info \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m pipeline\u001b[38;5;241m.\u001b[39mprocessor\u001b[38;5;241m.\u001b[39m_handle_rerank_failure(\n\u001b[0;32m     46\u001b[0m                     statement\u001b[38;5;241m=\u001b[39moriginal_statement, rerank_papers_content\u001b[38;5;241m=\u001b[39mrerank_papers_content, rerank_papers\u001b[38;5;241m=\u001b[39mrerank_retrieved_paper_list, rerank_threshold\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m19\u001b[39m)\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'function' object has no attribute 'run_in_executor'"
     ]
    }
   ],
   "source": [
    "failed_papers_id = []\n",
    "\n",
    "if any(score > 0.5 for score in cos_scores):\n",
    "    rerank_papers = [y for x, y in enumerate(\n",
    "    retrieved_papers) if cos_scores[x] > 0.5]\n",
    "    rerank_papers_content = [\n",
    "    x[\"entity\"][\"chunk_text\"] for x in rerank_papers]\n",
    "    rerank_results = await embeding_model.rerank_documents(\n",
    "    query=original_statement, documents=rerank_papers_content)\n",
    "    if rerank_results and any(result[\"relevance_score\"] > 19 for result in rerank_results[\"results\"]):\n",
    "        logger.info(\"第一次重排序结果: %s\", [\n",
    "                    result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "        reference_info = pipeline.processor._process_rerank_results(\n",
    "            rerank_results, rerank_papers, 19)\n",
    "        logger.info(\"第一次重排序成功\")\n",
    "    if all(result[\"relevance_score\"] < 19 for result in rerank_results[\"results\"]):\n",
    "        logger.info(\"第一次重排序结果: %s\", [\n",
    "                    result[\"relevance_score\"] for result in rerank_results[\"results\"]])\n",
    "        failed_papers_id += [r[\"id\"] for r in retrieved_papers]\n",
    "        logger.warning(\n",
    "            \"第一次重排序分数均小于 %f，重新生成 HYDE。\", 19)\n",
    "        new_hyde, core_questions = await pipeline.processor.gene_statement_hyde.generate_statement_hyde(statements=hyde, keywords=keywords)\n",
    "        rerank_retrieved_paper_tasks = [pipeline.query.query_by_content(\n",
    "                                q, top_k=10) for q in [new_hyde] + core_questions]\n",
    "        rerank_retrieved_paper_list = await asyncio.gather(*rerank_retrieved_paper_tasks)\n",
    "\n",
    "        unique_ids = set()\n",
    "        rerank_retrieved_paper_set_list = []\n",
    "        # 遍历嵌套列表，去重\n",
    "        for sublist in rerank_retrieved_paper_list:\n",
    "            for item in sublist:\n",
    "                if item[\"id\"] not in unique_ids:\n",
    "                    unique_ids.add(item[\"id\"])  # 添加ID到集合\n",
    "                    rerank_retrieved_paper_set_list.append(\n",
    "                        item)  # 添加到去重后的列表\n",
    "        rerank_retrieved_paper_list = rerank_retrieved_paper_set_list\n",
    "        rerank_retrieved_paper_list = [\n",
    "            paper for paper in rerank_retrieved_paper_list if paper[\"id\"] not in failed_papers_id]\n",
    "        logger.info(\n",
    "        f\"第二次重排序检索到的论文数量: {len(rerank_retrieved_paper_list)}\")\n",
    "        rerank_papers_content = [\n",
    "                            x[\"entity\"][\"chunk_text\"] for x in rerank_retrieved_paper_list]\n",
    "        rerank_papers_content = await loop.run_in_executor(None, chunking, rerank_papers_content)\n",
    "        logger.info(\"第二次重排序\")\n",
    "        reference_info = await pipeline.processor._handle_rerank_failure(\n",
    "                            statement=original_statement, rerank_papers_content=rerank_papers_content, rerank_papers=rerank_retrieved_paper_list, rerank_threshold=19)\n",
    "        print(reference_info)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "await pipeline.processor.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "# 获取或计算余弦相似度分数（使用缓存）\n",
    "cos_scores = await get_cached_cos_scores(original_statement, papers_content)\n",
    "\n",
    "# 根据余弦相似度分数判断后续处理逻辑\n",
    "if any(score > 0.5 for score in cos_scores):\n",
    "    # 如果任一相似度超过阈值，则采用高相似度的处理方法\n",
    "    result = await handle_high_cos_scores(\n",
    "        original_statement, retrieved_papers, cos_scores, keywords, hyde\n",
    "    )\n",
    "else:\n",
    "    # 否则采用低相似度的处理方法\n",
    "    result = await handle_low_cos_scores(\n",
    "        hyde, keywords, retrieved_papers\n",
    "    )\n",
    "\n",
    "# 将处理结果缓存\n",
    "rerank_cache[cache_key] = result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "async def process_hyde_with_cache(self, hyde: str, original_statement: str,\n",
    "                                        evidence_index: List[int], retrieved_papers: List[Dict[str, Any]],\n",
    "                                        keywords: str, merge_section: str) -> Tuple[Optional[int], Optional[str]]:\n",
    "    \"\"\"\n",
    "    使用缓存机制处理单条HYDE语句\n",
    "    如果缓存中存在结果则直接返回，否则计算后缓存结果\n",
    "    \"\"\"\n",
    "    # 构造缓存的key，使用hyde、original_statement和retrieved_papers中每个论文的id作为标识\n",
    "    cache_key = (hyde, original_statement, tuple(sorted(p['id'] for p in retrieved_papers)))\n",
    "    if cache_key in self._rerank_cache:\n",
    "        logger.info(f\"缓存命中，使用缓存的结果rerank: {cache_key}\")\n",
    "        return self._rerank_cache[cache_key]\n",
    "\n",
    "    try:\n",
    "        # 获取或计算论文的内容（使用缓存）\n",
    "        papers_content = await self._get_cached_papers_content(retrieved_papers)\n",
    "        \n",
    "        # 获取或计算余弦相似度分数（使用缓存）\n",
    "        cos_scores = await self._get_cached_cos_scores(original_statement, papers_content)\n",
    "        \n",
    "        # 根据余弦相似度分数判断后续处理逻辑\n",
    "        if any(score > self.cos_threshold for score in cos_scores):\n",
    "            # 如果任一相似度超过阈值，则采用高相似度的处理方法\n",
    "            result = await self._handle_high_cos_scores(\n",
    "                original_statement, retrieved_papers, cos_scores, keywords, hyde\n",
    "            )\n",
    "        else:\n",
    "            # 否则采用低相似度的处理方法\n",
    "            result = await self._handle_low_cos_scores(\n",
    "                hyde, keywords, retrieved_papers\n",
    "            )\n",
    "        \n",
    "        # 将处理结果缓存\n",
    "        self._rerank_cache[cache_key] = result\n",
    "        return result\n",
    "        \n",
    "    except Exception as e:\n",
    "        logger.error(f\"处理HYDE时发生错误: {str(e)}\")\n",
    "        return None, None\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "await process_drafts_in_batches(pipeline.parsed_draft_infos[0:2],batch_size=5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pipeline.parsed_draft_infos[0:1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.pipeline_reference:开始处理引用pipeline\n",
      "INFO:research_agent.core.pipeline_reference:已合并段落\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:research_agent.core.pipeline_reference:已完成find_statement\n",
      "INFO:research_agent.core.pipeline_reference:已完成prepare_draft_info\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The technological roadmap of multi-model large models is a critical area of research that has gained significant attention due to its potential to revolutionize various fields, including natural language processing, computer vision, and robotics. This research survey aims to analyze the evolution of multi-modal large models, evaluate the methodological approaches and architectures employed, and propose future directions for research and development.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The origins of multi-modal learning stem from the acknowledgment of the shortcomings of unimodal methods in dealing with the intricacies of real-world data, leading scholars to recognize the potential of combining information from various modalities—such as text, images, and audio—to create more resilient and precise models.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The foundational theories for multi-modal learning were established on the concepts of representation learning, fusion techniques, and co-learning methodologies, with representation learning facilitated by methods like autoencoders and disentanglement networks being instrumental in extracting significant features from each modality.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Fusion strategies, encompassing early fusion, late fusion, and attention mechanisms, were introduced to merge representations from diverse modalities effectively, enhancing model performance by harnessing the complementary nature of each modality's information.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: Co-learning strategies, including privileged information and multi-task learning, bolstered the abilities of multi-modal models, enabling the intermodal transfer of knowledge and allowing models to learn from the strengths of one modality to offset the weaknesses of another.\n",
      "INFO:research_agent.core.reference_processor:开始处理 HYDE: The convergence of these foundational theories provided the framework for further advancements in multi-modal learning and the emergence of large-scale multi-modal models.\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:research_agent.core.reference_processor:尝试次数: 1\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.58587381 0.51845207 0.56196483 0.49807235 0.53176343 0.51458254\n",
      " 0.53216691 0.55208212 0.54689684 0.46192081]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53031523 0.45881518 0.54204345 0.61458308 0.56663313 0.46857621\n",
      " 0.48516067 0.57058875 0.60652062 0.51397823 0.51641948 0.58076887]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.60376185 0.58412282 0.58429845 0.61583517 0.61113319 0.59661344\n",
      " 0.58986448 0.55464198 0.61247115 0.57735442 0.55111129 0.60033012\n",
      " 0.59555719]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54529988 0.55161416 0.56257724 0.51808356 0.46507362 0.50307193\n",
      " 0.46280139 0.4756837  0.5077353  0.51551641]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用 get_embeddings 生成向量失败: Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.54563595 0.56044475 0.55750084 0.55490378 0.58167078 0.49228194\n",
      " 0.49604255 0.60312297 0.55827183 0.47692702]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成向量失败 (第 1 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.50960082 0.50962362 0.4879396  0.51556546 0.48710166 0.4880463\n",
      " 0.46489486 0.52004879 0.50261356 0.50516689 0.4520402  0.48712993]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 400 Bad Request\"\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.40437058 0.52389401 0.54171081 0.52752386 0.52580874 0.53920667\n",
      " 0.53476941 0.4647884  0.52541142 0.43412274]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.53116569 0.66474165 0.62619294 0.60157998 0.61974996 0.59755191\n",
      " 0.57568585 0.57143173 0.46771741 0.62595327 0.52876827]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.8125, 19.484375, 17.828125, 17.65625, 17.625, 17.453125, 15.4921875, 15.046875, 14.953125]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 5.34 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.5625, 18.140625, 18.109375, 18.046875, 17.125, 16.765625, 15.5234375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "生成向量失败 (第 2 次): Error code: 400, with error text {\"error\":{\"code\":\"1210\",\"message\":\"API 调用参数有误，请检查文档。Request Entity Too Large\"}}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.65625, 18.015625, 17.890625, 17.25, 17.125, 15.9140625, 14.2109375]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.859375, 20.703125, 20.5, 19.78125, 19.484375, 19.4375, 19.3125, 18.90625, 18.875, 18.609375, 18.265625, 18.25, 16.09375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 8.55 秒\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "超过最大重试次数，返回零向量\n",
      "生成向量完成，共生成 10 个向量\n",
      "生成向量失败 [0] 个\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "d:\\GoodStudy\\FX15_reference_2\\summary-generation-match\\research_agent\\core\\embedding_model.py:36: RuntimeWarning: invalid value encountered in scalar divide\n",
      "  return dot_product / (norm_a * norm_b)\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [       nan 0.50679038 0.57201003 0.53099265 0.51193586 0.55454683\n",
      " 0.54489166 0.58397886 0.53594281 0.52518443]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.21875, 19.0625, 17.71875, 17.359375, 17.1875, 14.921875]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 9.23 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [18.953125, 16.84375, 15.7109375, 15.671875, 15.625, 15.234375, 14.703125]\n",
      "WARNING:research_agent.core.reference_processor:第一次重排序分数均小于 19.000000，重新生成 HYDE。\n",
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [20.1875, 19.890625, 18.734375, 18.515625, 18.453125, 18.4375, 17.703125, 17.546875, 15.359375]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 10.78 秒\n",
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [19.09375, 19.0625, 18.875, 18.4375, 18.3125, 18.234375, 18.09375, 17.1875, 16.203125, 16.0625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 10.78 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The paper delves into the profound impact of foundational theories on the evolution of multi-modal learning, a domain witnessing significant breakthroughs and paradigm shifts. By critically examining recent advancements, the study elucidates how emerging trends in multi-modal learning are reshaping the landscape of artificial intelligence. The analysis extends beyond mere documentation of these trends, exploring their theoretical underpinnings and methodological innovations. This exploration reveals critical research gaps that persist despite the literature developments in the field. The paper argues that a holistic understanding of these foundational theories is essential for bridging these gaps and fostering future innovations. Integrating insights from seminal works, the study provides a comprehensive framework for advancing multi-modal learning, emphasizing the need for interdisciplinary approaches and continuous theoretical refinement. ['How do foundational theories influence the recent advancements in multi-modal learning?', 'What paradigm shifts are emerging in the field of multi-modal learning, and how do they address existing research gaps?', 'How can a deeper understanding of foundational theories contribute to bridging the identified research gaps in multi-modal learning literature?']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The integration of foundational theories into the realm of multi-modal learning represents a pivotal advancement in the development of large-scale models. This paper delves into the theoretical underpinnings that bridge classical cognitive theories with contemporary artificial intelligence methodologies. Specifically, it examines how foundational theories, such as those proposed by Piaget and Vygotsky, can inform the design and optimization of multi-modal learning frameworks. These frameworks, which amalgamate various data types like text, image, and audio, are essential for the robustness and versatility of large-scale models. The paper extends this theoretical discourse by exploring the empirical dimensions of implementing these theories in real-world AI applications. Methodologically, it scrutinizes the challenges and opportunities inherent in harmonizing diverse theoretical perspectives within a unified computational model. Empirical analyses are presented to demonstrate the enhanced performance and interpretability of large-scale models when grounded in these foundational theories. The findings suggest that a synergistic approach, blending cognitive insights with advanced AI techniques, can significantly elevate the efficacy and generalizability of multi-modal learning systems. ['How do foundational theories inform the design and optimization of multi-modal learning frameworks?', 'What are the methodological challenges in integrating diverse theoretical perspectives within large-scale AI models?', 'How does the empirical implementation of foundational theories enhance the performance and interpretability of large-scale models?']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:尝试次数: 2\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第二次重排序检索到的论文数量: 21\n",
      "This paper delves into the nuanced interplay between methodology and critical analysis within the realm of artificial intelligence research. By scrutinizing existing research, the study identifies key trends and patterns that have shaped the field's evolution. The temporal scope of the analysis spans from early developments to contemporary advancements, highlighting the transformative impact of methodological innovations. Central to this exploration are the research questions and hypotheses that have driven empirical inquiries, offering a comprehensive understanding of the theoretical underpinnings and practical implications. The paper extends theoretical implications by examining how methodological choices influence the formulation and validation of research questions, thereby providing critical insights into the epistemological foundations of AI research. This analysis not only synthesizes current knowledge but also proposes directions for future investigations, emphasizing the importance of a holistic approach that integrates both theoretical depth and empirical rigor. ['How do methodological choices influence the formulation and validation of research questions in AI?', 'What key trends and patterns in existing AI research highlight the evolution of the field?', 'How has the temporal scope from early developments to current practices shaped the theoretical and empirical dimensions of AI research?']\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:research_agent.core.reference_processor:第二次重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:开始处理第二次重排序失败的情况\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Request failed: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n",
      "第二次重排序检索到的论文数量: 35\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:research_agent.core.reference_processor:第二次重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:开始处理第二次重排序失败的情况\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第二次重排序检索到的论文数量: 38\n",
      "Request failed: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 20.74 秒\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第三次重排序结果: [18.59375, 18.09375, 17.828125, 17.203125, 16.375, 15.5, 18.078125, 17.921875, 17.671875, 17.234375, 16.859375, 15.7890625, 17.265625, 17.03125, 16.296875, 16.171875, 15.921875, 15.5390625, 16.984375, 16.46875, 15.40625]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:research_agent.core.reference_processor:第二次重排序失败: 'NoneType' object is not subscriptable\n",
      "INFO:research_agent.core.reference_processor:开始处理第二次重排序失败的情况\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Request failed: 400, message='Attempt to decode JSON with unexpected mimetype: ', url='https://open.bigmodel.cn/api/paas/v4/rerank'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.reference_processor:计算的余弦分数: [0.57203581 0.61139473 0.61745487 0.47565586 0.57184758 0.57184758\n",
      " 0.54894983 0.57358253 0.5710498  0.59816408 0.50989878 0.573993\n",
      " 0.55463851 0.51066698 0.48958925 0.48428716 0.55955851 0.56682485\n",
      " 0.53966064 0.48017067 0.49533039 0.56880539 0.52064252]\n",
      "INFO:research_agent.core.reference_processor:开始重排序\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 21.93 秒\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 21.96 秒\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第三次重排序结果: [18.109375, 17.859375, 17.5, 17.0625, 16.828125, 16.75, 21.953125, 19.96875, 18.90625, 17.5, 16.765625, 14.4375, 19.34375, 17.765625, 17.125, 16.390625, 16.0625, 14.6484375, 18.046875, 17.796875, 15.7890625, 13.6015625, 12.5859375, 10.09375, 18.234375, 18.09375, 16.984375, 16.90625, 15.5390625, 14.8515625, 17.15625, 16.375, 16.21875, 14.1015625, 12.6640625]\n",
      "第三次重排序结果: [13.546875, 13.1796875, 13.1796875, 11.734375, 11.25, 11.2421875, 16.265625, 15.25, 12.453125, 11.7734375, 11.6953125, 11.40625, 18.53125, 15.1171875, 13.9921875, 13.21875, 12.65625, 12.0, 17.1875, 16.953125, 15.0078125, 13.765625, 12.6640625, 11.984375, 18.40625, 17.90625, 15.0234375, 13.6875, 13.5859375, 12.3828125, 15.0859375, 14.71875, 14.609375, 13.6640625, 13.515625, 12.71875, 14.625, 14.5]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.reference_processor:第一次重排序结果: [21.234375, 20.78125, 20.390625, 19.9375, 19.890625, 19.875, 19.75, 19.75, 19.296875, 19.1875, 19.03125, 18.890625, 18.5, 18.5, 17.90625, 17.734375, 17.609375, 16.515625]\n",
      "INFO:research_agent.core.reference_processor:第一次重排序成功\n",
      "INFO:research_agent.core.reference_processor:处理 HYDE 完成，耗时: 22.80 秒\n",
      "INFO:research_agent.core.pipeline_reference:已完成process_drafts\n",
      "INFO:research_agent.core.pipeline_reference:已完成update_sections\n",
      "INFO:research_agent.core.pipeline_reference:已完成replace_citations_with_num\n",
      "INFO:research_agent.core.pipeline_reference:已完成process_final_survey\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "\"\\n## 1 Introduction\\n\\nThe technological roadmap of multi-model large models is a critical area of research that has gained significant attention due to its potential to revolutionize various fields, including natural language processing, computer vision, and robotics. This research survey aims to analyze the evolution of multi-modal large models, evaluate the methodological approaches and architectures employed, and propose future directions for research and development.<sup>1</sup><sup>2</sup> The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models. The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field. The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.<sup>3</sup><sup>4</sup><sup>5</sup><sup>6</sup><sup>7</sup><sup>8</sup><sup>9</sup><sup>10</sup><sup>11</sup><sup>12</sup><sup>13</sup>\\n\\n ## 2 Evolution of Multi-Modal Large Models\\n### 2.1 Early Developments and Foundational Theories\\n\\nThe origins of multi-modal learning stem from the acknowledgment of the shortcomings of unimodal methods in dealing with the intricacies of real-world data. Scholars recognized the potential of combining information from various modalities—such as text, images, and audio—to create more resilient and precise models.<sup>14</sup><sup>15</sup><sup>16</sup><sup>17</sup><sup>18</sup><sup>19</sup> The foundational theories for multi-modal learning were established on the concepts of representation learning, fusion techniques, and co-learning methodologies.\\nRepresentation learning, facilitated by methods like autoencoders and disentanglement networks, was instrumental in extracting significant features from each modality. These techniques enabled the creation of representations that captured the data's intrinsic characteristics and promoted knowledge transfer across modalities. Fusion strategies, encompassing early fusion, late fusion, and attention mechanisms, were introduced to merge representations from diverse modalities effectively. These strategies enhanced model performance by harnessing the complementary nature of each modality's information.<sup>20</sup><sup>21</sup>\\nCo-learning strategies, including privileged information and multi-task learning, bolstered the abilities of multi-modal models. These approaches enabled the intermodal transfer of knowledge, allowing models to learn from the strengths of one modality to offset the weaknesses of another.<sup>22</sup><sup>23</sup> The convergence of these foundational theories provided the framework for further advancements in multi-modal learning and the emergence of large-scale multi-modal models.<sup>24</sup><sup>25</sup><sup>26</sup>\\n\\n# References\\n\\n[1] DetGPT: Detect What You Need Via Reasoning.Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db,2023, chunk 1\\n[2] DetGPT: Detect What You Need Via Reasoning.Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db,2023, chunk 2\\n[3] Quantifying and Enhancing Multi-modal Robustness with Modality Preference.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 0\\n[4] Mplug-Owl2: Revolutionizing Multi-modal Large Language Model with Modality Collaboration.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 0\\n[5] SHAPE: an Unified Approach to Evaluate the Contribution and Cooperation of Individual Modalities.Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db,2022, chunk 0\\n[6] GTP-4o: Modality-prompted Heterogeneous Graph Learning for Omni-modal Biomedical Representation.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 1\\n[7] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 1\\n[8] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 0\\n[9] MOTRv2: Bootstrapping End-to-End Multi-Object Tracking by Pretrained Object Detectors.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 0\\n[10] Towards All-in-one Pre-training Via Maximizing Multi-modal Mutual Information.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 0\\n[11] PixelLM: Pixel Reasoning with Large Multimodal Model.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 1\\n[12] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 9\\n[13] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 4\\n[14] Quantifying and Enhancing Multi-modal Robustness with Modality Preference.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 1\\n[15] Gradient-Guided Modality Decoupling for Missing-Modality Robustness.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 1\\n[16] MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 1\\n[17] Gradient-Guided Modality Decoupling for Missing-Modality Robustness.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 2\\n[18] Boosting Multi-modal Model Performance with Adaptive Gradient Modulation.Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 1\\n[19] Diagnosing and Re-learning for Balanced Multimodal Learning.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 2\\n[20] Multimodal Representation Learning by Alternating Unimodal Adaptation.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 3\\n[21] ETDPC: A Multimodality Framework for Classifying Pages in Electronic Theses and Dissertations.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 1\\n[22] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 7\\n[23] Human Action Recognition from Various Data Modalities: A Review.Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db,2023, chunk 18\\n[24] All in One Framework for Multimodal Re-identification in the Wild.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 5\\n[25] Mplug-2: A Modularized Multi-modal Foundation Model Across Text, Image and Video.Conf_Paper_Meta_Data_ICML_2023_with_whole_text.db,2023, chunk 0\\n[26] FedDAT: an Approach for Foundation Model Finetuning in Multi-Modal Heterogeneous Federated Learning.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 0\""
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "final_survey = await pipeline.pipeline_reference()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(final_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"1-2sections.md\", \"r\", encoding=\"utf-8\") as file:\n",
    "    final_survey = file.read()\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 核对"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['The technological roadmap of multi-model large models is a critical area of research that has gained significant attention due to its potential to revolutionize various fields, including natural language processing, computer vision, and robotics. This research survey aims to analyze the evolution of multi-modal large models, evaluate the methodological approaches and architectures employed, and propose future directions for research and development.',\n",
       " 'The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models.',\n",
       " 'The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field.',\n",
       " 'The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.']"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pipeline.parsed_draft_infos[0][\"hyde\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "## 1 Introduction\n",
      "\n",
      "The technological roadmap of multi-model large models is a critical area of research that has gained significant attention due to its potential to revolutionize various fields, including natural language processing, computer vision, and robotics. This research survey aims to analyze the evolution of multi-modal large models, evaluate the methodological approaches and architectures employed, and propose future directions for research and development.<sup>1</sup><ss>19.21875</ss><sup>2</sup><ss>19.0625</ss> The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models.<sup>3</sup><ss>20.5625</ss><sup>4</sup><ss>19.578125</ss><sup>5</sup><ss>19.359375</ss> The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field. The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.<sup>6</sup><ss>19.796875</ss><sup>7</sup><ss>19.546875</ss><sup>8</sup><ss>21.78125</ss><sup>9</sup><ss>20.859375</ss><sup>10</sup><ss>20.6875</ss><sup>11</sup><ss>20.125</ss><sup>12</sup><ss>19.921875</ss><sup>13</sup><ss>19.546875</ss>\n",
      "\n",
      " ## 2 Evolution of Multi-Modal Large Models\n",
      "### 2.1 Early Developments and Foundational Theories\n",
      "\n",
      "The origins of multi-modal learning stem from the acknowledgment of the shortcomings of unimodal methods in dealing with the intricacies of real-world data. Scholars recognized the potential of combining information from various modalities—such as text, images, and audio—to create more resilient and precise models.<sup>14</sup><ss>20.859375</ss><sup>15</sup><ss>20.703125</ss><sup>5</sup><ss>20.5</ss><sup>6</sup><ss>19.78125</ss><sup>16</sup><ss>19.484375</ss><sup>17</sup><ss>19.4375</ss><sup>18</sup><ss>19.3125</ss> The foundational theories for multi-modal learning were established on the concepts of representation learning, fusion techniques, and co-learning methodologies.\n",
      "Representation learning, facilitated by methods like autoencoders and disentanglement networks, was instrumental in extracting significant features from each modality. These techniques enabled the creation of representations that captured the data's intrinsic characteristics and promoted knowledge transfer across modalities.<sup>16</sup><ss>19.109375</ss><sup>18</sup><ss>19.109375</ss> Fusion strategies, encompassing early fusion, late fusion, and attention mechanisms, were introduced to merge representations from diverse modalities effectively. These strategies enhanced model performance by harnessing the complementary nature of each modality's information.<sup>19</sup><ss>20.1875</ss><sup>20</sup><ss>19.890625</ss>\n",
      "Co-learning strategies, including privileged information and multi-task learning, bolstered the abilities of multi-modal models. These approaches enabled the intermodal transfer of knowledge, allowing models to learn from the strengths of one modality to offset the weaknesses of another.<sup>21</sup><ss>19.8125</ss><sup>22</sup><ss>19.484375</ss> The convergence of these foundational theories provided the framework for further advancements in multi-modal learning and the emergence of large-scale multi-modal models.<sup>11</sup><ss>19.96875</ss><sup>23</sup><ss>19.34375</ss><sup>5</sup><ss>19.46875</ss>\n",
      "### 2.2 Breakthroughs and Paradigm Shifts\n",
      "\n",
      "The field of multi-modal large models has seen significant breakthroughs and paradigm shifts. The integration of deep learning, especially the transformer architecture, has been a pivotal advancement. This integration has enhanced models' ability to process and learn from diverse data modalities, improving performance and generalization.\n",
      "Developments in fusion strategies have also been crucial. Early fusion techniques merge data from multiple modalities at the initial stage, while late fusion methods process each modality separately before combining their representations. These strategies allow for a more nuanced understanding of each modality's strengths.<sup>17</sup><ss>19.890625</ss><sup>19</sup><ss>19.859375</ss><sup>24</sup><ss>19.453125</ss><sup>25</sup><ss>19.109375</ss>\n",
      "The advent of large-scale pre-trained models like CLIP and DALL-E has transformed the field. These foundational models can be fine-tuned for specific tasks and have shown impressive capabilities in zero-shot learning, image generation, and other complex multimodal tasks.<sup>26</sup><ss>21.578125</ss><sup>27</sup><ss>20.953125</ss><sup>28</sup><ss>20.578125</ss><sup>29</sup><ss>20.46875</ss><sup>30</sup><ss>20.140625</ss><sup>31</sup><ss>20.015625</ss><sup>32</sup><ss>19.453125</ss>\n",
      "These breakthroughs and paradigm shifts have led to the creation of more sophisticated models capable of effectively integrating and utilizing information from various modalities. These advancements are driving the development of new techniques and applications, positioning multi-modal large models for a central role in future technologies.<sup>6</sup><ss>19.03125</ss>\n",
      "### 2.3 Recent Advancements and Emerging Trends\n",
      "\n",
      "Recent advancements in multi-modal large models have centered on integrating diverse modalities and developing innovative training techniques. These models are designed to understand and process information from various sources, such as text, images, and audio, with enhanced efficiency. Notably, the adoption of contrastive learning objectives, exemplified by OpenAI’s CLIP, has led to significant performance improvements in zero-shot classification and robustness against distribution shifts.<sup>33</sup><ss>22.25</ss><sup>26</sup><ss>21.6875</ss><sup>27</sup><ss>21.671875</ss><sup>28</sup><ss>21.5625</ss><sup>29</sup><ss>21.28125</ss><sup>34</sup><ss>20.90625</ss><sup>35</sup><ss>20.40625</ss><sup>36</sup><ss>20.3125</ss><sup>37</sup><ss>20.015625</ss><sup>31</sup><ss>19.890625</ss><sup>38</sup><ss>19.4375</ss><sup>39</sup><ss>19.421875</ss><sup>40</sup><ss>19.21875</ss> In the realm of image generation, models like DALL-E have showcased the capability of text-guided image generation, producing high-quality images from textual descriptions.\n",
      "The progress in these areas has been fueled by the availability of large-scale datasets, such as those containing hundreds of millions or billions of image-text pairs.<sup>41</sup><ss>22.921875</ss><sup>42</sup><ss>21.9375</ss><sup>43</sup><ss>21.265625</ss><sup>44</sup><ss>20.046875</ss><sup>45</sup><ss>19.859375</ss><sup>46</sup><ss>19.703125</ss><sup>47</sup><ss>19.65625</ss> The LAION-5B dataset, with over 5.8 billion examples, has been introduced to overcome the limitations posed by the scarcity of publicly available datasets, enabling the research community to audit and refine such extensive collections.<sup>26</sup><ss>23.53125</ss><sup>48</sup><ss>22.171875</ss><sup>49</sup><ss>22.125</ss><sup>50</sup><ss>21.25</ss><sup>51</sup><ss>20.84375</ss><sup>52</sup><ss>19.375</ss>\n",
      "Future research directions will involve examining the ethical implications of large-scale data collection and addressing the challenges associated with training and deploying these models in real-world scenarios. Exploring the potential of new modalities, like haptic or olfactory data, and developing more efficient and scalable model architectures will also be key to advancing multi-modal large models.\n",
      "\n",
      "# References\n",
      "\n",
      "[1] DetGPT: Detect What You Need Via Reasoning.Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db,2023, chunk 1\n",
      "[2] DetGPT: Detect What You Need Via Reasoning.Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db,2023, chunk 2\n",
      "[3] All in One Framework for Multimodal Re-identification in the Wild.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 5\n",
      "[4] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 9\n",
      "[5] MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 1\n",
      "[6] GTP-4o: Modality-prompted Heterogeneous Graph Learning for Omni-modal Biomedical Representation.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 1\n",
      "[7] Multi-sensor Learning Enables Information Transfer Across Different Sensory Data and Augments Multi-modality Imaging.Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db,2024, chunk 1\n",
      "[8] Quantifying and Enhancing Multi-modal Robustness with Modality Preference.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 0\n",
      "[9] Mplug-Owl2: Revolutionizing Multi-modal Large Language Model with Modality Collaboration.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 0\n",
      "[10] SHAPE: an Unified Approach to Evaluate the Contribution and Cooperation of Individual Modalities.Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db,2022, chunk 0\n",
      "[11] Mplug-2: A Modularized Multi-modal Foundation Model Across Text, Image and Video.Conf_Paper_Meta_Data_ICML_2023_with_whole_text.db,2023, chunk 0\n",
      "[12] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 1\n",
      "[13] Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 0\n",
      "[14] Quantifying and Enhancing Multi-modal Robustness with Modality Preference.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 1\n",
      "[15] Gradient-Guided Modality Decoupling for Missing-Modality Robustness.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 1\n",
      "[16] Gradient-Guided Modality Decoupling for Missing-Modality Robustness.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 2\n",
      "[17] Boosting Multi-modal Model Performance with Adaptive Gradient Modulation.Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 1\n",
      "[18] Diagnosing and Re-learning for Balanced Multimodal Learning.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 2\n",
      "[19] Multimodal Representation Learning by Alternating Unimodal Adaptation.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 3\n",
      "[20] ETDPC: A Multimodality Framework for Classifying Pages in Electronic Theses and Dissertations.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 1\n",
      "[21] Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 7\n",
      "[22] Human Action Recognition from Various Data Modalities: A Review.Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db,2023, chunk 18\n",
      "[23] FedDAT: an Approach for Foundation Model Finetuning in Multi-Modal Heterogeneous Federated Learning.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 0\n",
      "[24] COLD Fusion: Calibrated and Ordinal Latent Distribution Fusion for Uncertainty-Aware Multimodal Emotion Recognition.Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db,2023, chunk 3\n",
      "[25] 3D Object Detection for Autonomous Driving: A Comprehensive Survey.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2023, chunk 17\n",
      "[26] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 1\n",
      "[27] EVA: Exploring the Limits of Masked Visual Representation Learning at Scale.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 6\n",
      "[28] Understanding Transferable Representation Learning and Zero-shot Transfer in CLIP.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 0\n",
      "[29] Reproducible Scaling Laws for Contrastive Language-Image Learning.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 1\n",
      "[30] Domain-Inspired Sharpness-Aware Minimization under Domain Shifts.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 4\n",
      "[31] Multimodality Helps Unimodality: Cross-Modal Few-Shot Learning with Multimodal Models.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 2\n",
      "[32] DeIl: Direct and Inverse CLIP for Open-World Few-Shot Learning.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 1\n",
      "[33] Understanding Zero-Shot Adversarial Robustness for Large-Scale Models.Conf_Paper_Meta_Data_ICLR_2023_with_whole_text.db,2023, chunk 0\n",
      "[34] Understanding Transferable Representation Learning and Zero-shot Transfer in CLIP.Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db,2024, chunk 1\n",
      "[35] Non-Contrastive Learning Meets Language-Image Pre-Training.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 0\n",
      "[36] Understanding Zero-Shot Adversarial Robustness for Large-Scale Models.Conf_Paper_Meta_Data_ICLR_2023_with_whole_text.db,2023, chunk 3\n",
      "[37] Supervision Exists Everywhere: A Data Efficient Contrastive Language-Image Pre-training Paradigm.Conf_Paper_Meta_Data_ICLR_2022_International_Conference_on_Learning_Representation_with_whole_text.db,2022, chunk 1\n",
      "[38] CleanCLIP: Mitigating Data Poisoning Attacks in Multimodal Contrastive Learning..Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 2\n",
      "[39] CLAP: Isolating Content from Style Through Contrastive Learning with Augmented Prompts.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 5\n",
      "[40] Improving Medical Multi-modal Contrastive Learning with Expert Annotations.Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db,2024, chunk 2\n",
      "[41] DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generation Models.Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 1\n",
      "[42] DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generation Models.Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 0\n",
      "[43] Compositional Visual Generation with Composable Diffusion Models..Conf_Paper_Meta_Data_ECCV_2022_European_Conference_on_Computer_Vision_with_whole_text.db,2022, chunk 0\n",
      "[44] Image Content Generation with Causal Reasoning.Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db,2024, chunk 2\n",
      "[45] CosmicMan: A Text-to-Image Foundation Model for Humans.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 2\n",
      "[46] Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 4\n",
      "[47] DALL-Eval: Probing the Reasoning Skills and Social Biases of Text-to-Image Generation Models.Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db,2023, chunk 5\n",
      "[48] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 0\n",
      "[49] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 5\n",
      "[50] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 7\n",
      "[51] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 6\n",
      "[52] LAION-5B: an Open Large-Scale Dataset for Training Next Generation Image-Text Models..Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db,2022, chunk 14\n"
     ]
    }
   ],
   "source": [
    "print(final_survey)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "from research_agent.core.query import Query\n",
    "query = Query()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models.<sup>3</sup><ss>20.5625</ss><sup>4</sup><ss>19.578125</ss><sup>5</sup><ss>19.359375</ss> "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\nThe methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field. The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.<sup>6</sup><ss>19.796875</ss><sup>7</sup><ss>19.546875</ss><sup>8</sup><ss>21.78125</ss><sup>9</sup><ss>20.859375</ss><sup>10</sup><ss>20.6875</ss><sup>11</sup><ss>20.125</ss><sup>12</sup><ss>19.921875</ss><sup>13</sup><ss>19.546875</ss>\\n'"
      ]
     },
     "execution_count": 94,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "statement = '''\n",
    "The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field. The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.<sup>6</sup><ss>19.796875</ss><sup>7</sup><ss>19.546875</ss><sup>8</sup><ss>21.78125</ss><sup>9</sup><ss>20.859375</ss><sup>10</sup><ss>20.6875</ss><sup>11</sup><ss>20.125</ss><sup>12</sup><ss>19.921875</ss><sup>13</sup><ss>19.546875</ss>\n",
    "'''\n",
    "re_a = re.findall(r\"(?:<sup>.*?</sup><ss>.*?</ss>)+\", a)[0]\n",
    "re_aa = re.findall(r\"<sup>(.*?)</sup><ss>(.*?)</ss>\", re_a)\n",
    "context = []\n",
    "for i in re_aa:\n",
    "    paper = reference_dict[i[0]]\n",
    "    title = re.findall(r\".*?\\.\", paper)[0]\n",
    "    chunk_id = re.findall(r\"chunk (\\d+)\", paper)[0]\n",
    "    query = Query()\n",
    "    papers = await query.query_by_title_like(title,top_k=1)\n",
    "    papers = papers[0]\n",
    "    chunk_text = papers[int(chunk_id)][\"chunk_text\"]\n",
    "    context.append(f\"title:{title} chunk_id:{chunk_id}:\\nchunk_text:{chunk_text}\")\n",
    "statement"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['title:All in One Framework for Multimodal Re-identification in the Wild. chunk_id:5:\\nchunk_text:# 4.2. Ablation Study\\nEfficacy of Designed Modules. We first evaluate the effectiveness of the designed components. As evident from Tab. 4 , each introduced cross-modal head proves crucial for the overall performance of our All-in-One (AIO) framework. Specifically, VA head yields the most substantial performance enhanceme he $\\\\mathbf{T}{\\\\rightarrow}\\\\mathbf{R}$ task, CE head plays an important role in the R cross-modal and multimodal tasks. $\\\\mathbf{R}{\\\\rightarrow}\\\\mathbf{R}$ →R task, and FB head improves all Different Foundation Models. We explore various foundation models, including Uni-Perceiver v2 [ 28 ] (Uni), a Vision Transformer (ViT) pre-trained on LuPerson [ 41 ], and the pre-trained image encoder from CLIP [ 48 ]. The performance of these diverse foundation models is presented in Tab. 5 . As discernible from the table, the performance demonstrates an upward trend with the expansion of the pretraining dataset. Notably, despite LuPerson’s exclusive focus on ReID tasks, its performance lags behind other models due to its comparatively smaller size. This discrepancy underscores the pronounced zero-shot performance benefits associated with large-scale pre-trained foundation models. Influence of Multimodality Input. Because our proposed AIO framework supports any combination of diverse modalities as input, we also analyze the influence of different combinations of multimodal inputs on Tri-CUHKPEDES [ 7 ], where the missing IR modality is generated by CA [ 72 ]. As presented in Tab. 6 , our analysis indicates a preference for RGB and text modalities within our framework over other modalities. Furthermore, when the number of input modalities reaches or exceeds three, there is no significant alteration in performance. This outcome aligns with expectations, as RGB and Text modalities inherently provide more discriminative details than others.  \\n\\n<html><body><table><tr><td rowspan=\"2\">Type</td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Venue</td><td colspan=\"2\">RR</td><td colspan=\"2\">IR</td><td colspan=\"2\">S→→R</td><td colspan=\"2\">T→R</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td rowspan=\"3\">Pre-train</td><td>LuPerson-NL [12]</td><td>CVPR22</td><td>24.6*</td><td>11.6*</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>PLIP [88]</td><td>arXiv23</td><td>80.4</td><td>59.7</td><td></td><td></td><td></td><td></td><td>57.7</td><td>-</td></tr><tr><td>APTM [66]</td><td>ACMMM23</td><td>5.3*</td><td>3.5*</td><td>-</td><td>-</td><td>-</td><td>-</td><td>9.6*</td><td>2.7*</td></tr><tr><td rowspan=\"3\">Unimodal</td><td>OSNet-IBN [84]</td><td>ICCV19</td><td>73.0</td><td>44.9</td><td>-</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>M?L [81]</td><td>CVPR21</td><td>78.3</td><td>52.5</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>OSNet-AIN [85]</td><td>TPAMI21</td><td>73.3</td><td>45.8</td><td>-</td><td></td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td rowspan=\"3\">Cross-modal</td><td>AGW [69]</td><td>TPAMI21</td><td>17.3*</td><td>6.9*</td><td>18.2*</td><td>19.1*</td><td></td><td></td><td></td><td></td></tr><tr><td>IRRA [23]</td><td>CVPR23</td><td>66.6*</td><td>40.5*</td><td></td><td></td><td></td><td>-</td><td>30.1*</td><td>25.3*</td></tr><tr><td>UNIReID [7]</td><td>CVPR23</td><td>19.0*</td><td>8.2*</td><td></td><td></td><td>69.8</td><td>73.0</td><td>11.6*</td><td>9.7*</td></tr><tr><td>Multimodal</td><td>AIO (Ours)</td><td>-</td><td>79.6</td><td>59.9</td><td>57.6</td><td>51.9</td><td>70.2</td><td>73.5</td><td>53.4</td><td>43.4</td></tr></table></body></html>\\n\\nTable 7. Zero-shot performance on cross-modal retrieval. The best Rank-1 and mAP performance are reported. Results with \\\\* indicate that the experiment results are produced by authors. GW [ 69 ], it is trained on MSMT17 [ 58 ] and LLCM [ 78 ] f $\\\\scriptstyle\\\\mathbf{R}\\\\to\\\\mathbf{R}$ a $\\\\mathrm{I}{\\\\rightarrow}\\\\mathrm{R}$ .For IRRA [ 23 ], it is trained on ICFG-PEDES [ 9 ] for T $\\\\mathrm{T}{\\\\rightarrow}\\\\mathrm{R}$ →R. For UNIReID [ 7 ], it is trained on Tri-ICFG-PEDES [ 7 ] for T $\\\\mathrm{T}{\\\\rightarrow}\\\\mathrm{R}$ →R and R $\\\\scriptstyle\\\\mathrm{R}\\\\to\\\\mathrm{R}$ →R.  \\n\\n<html><body><table><tr><td>Method</td><td>Rank-1</td><td>mAP</td><td>mINP</td></tr><tr><td>UNIReID (T→→R)</td><td>76.8</td><td>80.6</td><td>77.8</td></tr><tr><td>UNIReID (S-→R)</td><td>69.8</td><td>73.0</td><td>68.3</td></tr><tr><td>AIO (T→R)</td><td>78.2</td><td>81.7</td><td>78.4</td></tr><tr><td>AIO (S→→R)</td><td>69.8</td><td>72.8</td><td>68.8</td></tr><tr><td>UNIReID (S+T→R)</td><td>91.4</td><td>91.8</td><td>89.0</td></tr><tr><td>AIO (S+T→R)</td><td>92.1</td><td>92.2</td><td>89.2</td></tr><tr><td>AIO (R+S+T→→R)</td><td>93.6</td><td>93.7</td><td>90.0</td></tr><tr><td>AIO (R+I+S+T→R)</td><td>93.8</td><td>93.7</td><td>90.3</td></tr></table></body></html>\\n\\nTable 8. Zero-shot performance with multimodal input and generalized cross-modal on PKU-Sketch.\\n\\n# 4.3. Evaluation on Multimodal ReID\\nGiven the rarity of generalizable works across cross-modal, multimodal, and pre-trained ReID, we conduct a comprehensive comparative analysis involving the proposed AIO framework, various large-scale pre-trained ReID models, unimodal generalized methods, cross-modal methods, and multimodal methods, all within the zero-shot setting. As illustrated in Tab. 7 , the existing large-scale pre-trained ReID models, with the exception of PLIP, exhibit unsatisfactory performance in the zero-shot setting. Moreover, AIO achieves competitive performance compared to unimodal generalization methods on $\\\\scriptstyle\\\\mathbf{R}\\\\to\\\\mathbf{R}$ retrieval task and outperforms cross-modal methods on all cross-modal retrieval tasks in the zero-shot setting. Notably, existing methods fall short in generalizing to unseen modalities, a limitation overcome by AIO, which adeptly handles all four modalities in cross-modal tasks. The outcomes presented in Tab. 8 unveil the remarkable performance of the proposed AIO framework when incorporating multimodal input. This superior performance stands in stark contrast to methods relying solely on unimodal inputs in cross-modal tasks. Additionally, the results consistently underscore the impact of different modalities, aligning with the conclusions drawn from our preceding ablation studies that AIO is more in favor of Text and RGB modalities than others. Moreover, we also discuss the difference between AIO and UNIReID in detail and the limitation of AIO in the supplemental part.\\n\\n# 5. Conclusion\\nTo the best of our knowledge, this is the first work delving into the uncertain multimodal ReID tasks encompassing all four prevalent modalities, e.g . RGB, IR, Sketch, and Text. We investigate the feasibility of harnessing largescale foundation models for multimodal ReID tasks, presenting a prospective avenue toward zero-shot multimodal ReID in wild conditions. In order to cooperate with foundation models, we introduce an innovative multimodal tokenizer, designed to utilize disparate modality inputs within a shared embedding space, guided by carefully crafted crossmodal heads. Moreover, we introduce synthetic augmentation methods with a progressively learning strategy to alleviate the missing modality problem and mitigate the crossmodal gap between different modalities. Extensive experimentation demonstrates the efficacy and competitive performance of the proposed AIO framework across both zeroshot cross-modal and multimodal ReID tasks.  \\n\\nAcknowledgement. This work is partially supported by National Natural Science Foundation of China under Grant (62176188, 62361166629, 62225113, 62306215), and the Special Fund of Hubei Luojia Laboratory (220100015).\\n\\n\\n\\n# All in One Framework for Multimodal Re-identification in the Wild\\nSupplementary Material\\n\\n# Differences between UNIReID and AIO\\nThere are three distinctions between UNIReID and AIO:  \\n\\n1) Divergent Goals: UNIReID and AIO fundamentally differ in their objectives. UNIReID aims to construct a multimodal model for intra-domain retrieval with the descriptive query. At the same time, AIO is explicitly crafted for universal retrieval in real-world scenarios, with four arbitrary modalities or their combinations. Notably, all experiments in this paper follow a zero-shot generalizable setting, which is inapplicable for UNIReID.  \\n\\n2) Different Challenges: UNIReID demands paired multimodal data. In comparison, AIO confronts even more challenging scenarios, involving unpaired heterogeneous multimodal data, with imbalanced and missing modalities. Thus, we introduce synthesized modalities and build connections among imbalanced modalities.  \\n\\n3) Disparate Approach: UNIReID incorporates multiple tasks to accommodate uncertain multimodal input. The number of optimization objectives of UNIReID grows exponentially with the number of modalities, making it hard to extend to more modalities and hindering its scalability. Conversely, AIO designs a flexible solution, treating uncertain multimodal input as variable input lengths. It leverages the adaptable nature of the transformer architecture, simplifying the integration of additional modalities. Furthermore, UNIReID employs separate encoders for various modalities, resulting in a lack of synergy between distinctive modalities. Different from UNIReID, AIO leverages a shared foundation model as the backbone to collaboratively learn comprehensive knowledge from heterogeneous multimodal data to complement each other and enhance its generalizablity in real-world scenarios.  \\n\\nAll these differences make AIO more robust and generalizable than UNIReID in real scenarios.',\n",
       " 'title:Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects. chunk_id:9:\\nchunk_text:# 3 Discussion\\nThe rapidly evolving landscape of artificial intelligence (AI) both within the biomedical field and beyond has posed a substantial challenge in composing this survey. Our aim is to provide the reader with a comprehensive overview of the challenges and contemporary approaches to multimodal machine learning in image-based, clinically relevant biomedicine. However, it is essential to acknowledge that our endeavor cannot be fully comprehensive due to the dynamic nature of the field and the sheer volume of emerging literature within the biomedical domain and its periphery. This robust growth has led to a race among industry and research institutions to integrate the latest cutting-edge models into the healthcare sector, with a particular emphasis on the introduction of “large language models” (LLMs). In recent years, there has been an emergence of market-level insights into the future of healthcare and machine learning, as exemplified by the incorporation of machine learning models into wearable devices such as the Apple Watch and Fitbit devices for the detection of atrial fibrillation (Perino et al., 2021 ; Lubitz et al., 2022 ). This begs the question: where does this transformative journey lead us?  \\n\\nHealthcare professionals and physicians already embrace the concept of multimodal cognitive models in their diagnostic and prognostic practices, signaling that such computer models based on multimodal frameworks are likely to endure within the biomedical landscape. However, for these models to be effectively integrated into clinical settings, they must exhibit flexibility that aligns with the clinical environment. If the ultimate goal is to seamlessly incorporate these AI advancements into clinical practice, a fundamental question arises: how can these models be practically implemented on-site? Presently, most available software tools for clinicians are intended as auxiliary aids, but healthcare professionals have voiced concerns regarding the potential for increased computational workload, alert fatigue, and the limitations imposed by Electronic Health Record (EHR) interfaces (Ruiter et al., 2015 ; Ancker et al., 2017 ). Therefore, it is paramount to ensure that any additional software introduced into clinical settings serves as an asset rather than a hindrance.  \\n\\nAnother pertinent issue emerging from these discussions pertains to the dynamics between clinical decision support systems (CDSS) and healthcare providers. What occurs when a computer-generated recommendation contradicts a physician’s judgment? This dilemma is not new, as evidenced by a classic case recounted by Evans et al. ( 1998 ), where physicians were granted the choice to either follow or disregard a CDSS for antibiotic prescription. Intriguingly, the group provided with the choice exhibited suboptimal performance compared to both the physician-only and computer-only groups. Consequently, it is unsurprising that some healthcare professionals maintain a cautious approach to computer decision support systems (Adamson and Welch, 2019 ; Silcox et al., 2020 ). Questions arise regarding the accountability of physicians if they ignore a correct computer-generated decision and the responsibility of software developers if a physician follows an erroneous computer-generated recommendation.  \\n\\nA pivotal ingredient notably under-represented in many CDSS models, which could help alleviate discrepancies between computer-generated and human decisions, is the incorporation of uncertainty quantification, grounded calibration, interpretability and explainability. These factors have been discussed in previous literature, underscoring the critical role of explainability in ensuring the long-term success of CDSS-related endeavors (Reddy, 2022 ; Khosravi et al., 2022 ; Kwon et al., 2020 ; Abdar et al., 2021 ).  \\n\\nThe domain of multimodal machine learning for medically oriented image-based clinical support has garnered increasing attention in recent years. This interest has been stimulated by advances in computer science architecture and computing hardware, the availability of vast and publicly accessible data, innovative model architectures tailored for limited datasets, and the growing demand for applications in clinical and biomedical contexts. Recent studies have showcased the ability to generate synthetic images in one modality based on another (as outlined in Sect. 2.3 ), align multiple modalities (Sect. 2.4 ), and transfer latent features from one modality to train another (Sect. 2.5 ), among other advancements. These developments offer a promising outlook for a field that is still relatively new. However, it is also imperative to remain vigilant regarding the prevention of data biases and under-representation in ML models to maximize the potential of these technologies.  \\n\\nDespite these promising developments, the field faces significant hurdles, notably the lack of readily available “big data” in the medical domain. For instance, the routine digitization of histopathology slides remains a challenging goal in many healthcare facilities. Data sharing among medical institutions is fraught with challenges around appropriate procedures for the responsible sharing of patient data under institutional, national and international patient privacy regulations.  \\n\\nAdvancing the field will likely entail overcoming these hurdles, ensuring more extensive sharing of de-identified data from research publications and greater participation in establishment of standardized public repositories for data. Dissemination of both code and pretrained model weights would also enable greater knowledge-sharing and repeatability. Models that incorporate uncertainty quantification, explainability, and strategies to account for missing data are particularly advantageous. For more guidance on building appropriate multimodal AI models in healthcare, one can refer to the World Health Organization’s new ethics and governance guidelines for large multimodal models (World Health Organization, 2024 ).  \\n\\nIn conclusion, the field of multimodal machine learning in biomedicine has experienced rapid growth in each of its challenge areas of representation, fusion, translation, alignment, and co-learning. Given the recent advancements in deep learning models, escalating interest in multimodality, and the necessity for multimodal applications in healthcare, itislikelythatthefieldwillcontinuetomatureandbroadenits clinical applications. In this ever-evolving intersection of AI and healthcare, the imperative for responsible innovation resonates strongly. The future of multimodal machine learning in the biomedical sphere presents immense potential but also mandates a dedication to ethical principles encompassing data privacy, accountability, and transparent collaboration between human professionals and AI systems. As we navigate this transformative journey, the collective effort, ethical stewardship, and adherence to best practices will ensure the realization of the benefits of AI and multimodal machine learning, making healthcare more efficient, accurate, and accessible, all while safeguarding the well-being of patients and upholding the procedural and ethical standards of clinical practice.  \\n\\nAuthor Contributions E.W. contributed the main writing of the paper. This paper concept was formulated by W.H., T.S.M., O.G., and J.L., A.R., W.H., T.S.M., C.E.K., and A.R. contributed ideas and direction for the writing and assisted in the proofreading and selection of the concepts and papers covered.  \\n\\nFunding E.W and A.R are greateful for support from NIH grant R37CA214955-01A1. All authors are grateful to support from the AMIA Biomedical Image Informatics Working group.  \\n\\nData Availability No data outside of those referenced has been used in this survey. Key papers have been summarized in Table 1 .',\n",
       " 'title:MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning. chunk_id:1:\\nchunk_text:# 1. Introduction\\nMultimodal learning has achieved great success on many vision tasks such as classification [ 21 ,33 ,46 ], object detection [ 26 ,45 ,53 ], and segmentation [ 5 ,23 ,41 ]. However, most successful methods assume that the models are trained and tested with the same modality data. In fact, limited by device [ 32 ,39 ], user privacy [ 13 ,25 ], and working condition [ 3 ,29 ], it is often very costly or even infeasible to collect complete modality data during the inference stage. There is thus substantial interest in assisting the incomplete or even single modality inference via the complete modality data during training.  \\n\\n<html><body><table><tr><td>Modality</td><td>Customized</td><td>Unified</td><td>Drop</td></tr><tr><td>RGB</td><td>10.01</td><td>11.75</td><td>-1.65</td></tr><tr><td>Depth</td><td>4.45</td><td>5.87</td><td>-1.42</td></tr><tr><td>IR</td><td>11.65</td><td>16.62</td><td>-4.97</td></tr><tr><td>RGB+Depth</td><td>3.41</td><td>4.61</td><td>-1.2</td></tr><tr><td>RGB+IR</td><td>6.32</td><td>6.68</td><td>-0.36</td></tr><tr><td>Depth+IR</td><td>3.54</td><td>4.95</td><td>-1.41</td></tr><tr><td>RGB+Depth+IR</td><td>1.23</td><td>2.21</td><td>-0.98</td></tr></table></body></html>  \\n\\nA typical solution is to reconstruct the sample or feature of the missing modalities from the available ones [ 10 ,14 ,15 ,20 ,29 ,32 ]. Nevertheless, this needs to build a specific model for each modality from all possible modality combinations and thus has high complexity. Recent studies focus on learning a unified model, instead of a bunch of networks, for different modality combinations. Generally, many such approaches [ 6 ,11 ,12 ,17 ,51 ,52 ] attempt to leverage feature fusion strategies to capture modality-invariant representation so that the model can adapt to all possible modality combinations. For example, RFNet [ 11 ] designs the regionaware fusion module to fuse the features of available image modalities.  \\n\\nAlthough the existing unified models are indeed able to increase the efficiency of training and deployment of the multimodal models, their performance is likely to be suboptimal. As shown in Table 1 , the customized models consistently outperform the unified model for different modality combinations. This is because existing unified models usually focus on the modality-invariant features while ignoring the modality-specific information. Note that the complementary modality-specific information of multiple modalities can help refine the inter-class discrimination and improve inference performance [ 2 ,18 ,36 ]. This motivates us to propose the first research question of this paper: Can a unified model consider the modality invariant and specific information simultaneously while maintaining robustness for incomplete modality input?  \\n\\nTo this end, we propose to guide the unified model to learn the comprehensive multimodal information from the teacher model trained with complete modality. This regularizes the target task loss to encourage the unified model to acquire complementary information among different modality combinations multimodal information while preserving the generalization to them. Specifically, we propose a novel margin-aware distillation (MAD) that trains the unified model by guiding it to mimic the inter-sample relation of the teacher model. MAD introduces the classification uncertainty of samples to re-weigh their contribution to the final loss. Since the samples near the class boundary are more likely to be misclassified and have higher classification uncertainty [ 8 ], this encourages the unified model to preserve the inter-class margin refined by the complementary cues and learn the modality-specific information.  \\n\\nAnother limitation of existing unified approaches is that they struggle to obtain optimal performance for the unbalanced training problem. To be specific, conventional multimodal learning models tend to fit the discriminative modality combination and their performance will degrade significantly when facing weak modality combinations. To solve this issue, existing unified approaches introduce the auxiliary discriminator to enhance the discrimination ability of the unimodal combinations [ 6 ,11 ,51 ]. This utilizes a hypothesis that a single modality is weaker than multiple ones. However, as shown in Table 1 , no matter for the customized model or the unified model, the single Depth modality outperforms the RGB, IR, and their combinations. This indicates the combination with multiple weak modalities may be harder to be optimized than a single strong modality. Moreover, as shown in Table 3 , RGB becomes the strong modality while Depth and IR become the weak modalities. This indicates that the modality importance is not fixed but varies with scenarios. These findings motivate us to propose the second research question: How to effectively optimize the weak modality combination in varying scenarios?  \\n\\nTo this end, we design a regularization network and MAR algorithm to assist the training of the unified network. Specifically, the regularization network generates additional predictions for all inputs. Then MAR mines and calculates prediction loss for the sample from the weak combinations. This forces the unified model to improve its representation ability for the weak combination. In detail, MAR mines the weak combination via the memorization effect [ 1 ,16 ,49 ]that DNNs tend to first memorize simple examples before overfitting hard examples. As shown in Fig. 5 (a), the unified model tends to fit the samples containing Depth modality firstly at the early stage. Therefore, MAR first mines the strong modality via the memorization effect. Then it determines the combinations of rest modalities as the weak ones.  \\n\\nFinally, we develop a model and task agnostic framework called MMANet to assist incomplete multimodal learning by combining the proposed MAD and MAR strategies. MMANet can guide the unified model to acquire comprehensive multimodal information and balance the performance of the strong and weak modality combination simultaneously. Extensive comparison and ablation experiments on multimodal classification and segmentation tasks demonstrate the effectiveness of the MMANet.']"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "context"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[{'id': 454847560048311778,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning\\nShicai Wei Chunbo Luo Yang Luo School of Information and Communication Engineering University of Electronic Science and Technology of China  \\n\\n {c.luo, luoyang }@uestc.edu.cn\\n\\n# Abstract\\nMultimodal learning has shown great potentials in numerous scenes and attracts increasing interest recently. However, it often encounters the problem of missing modality data and thus suffers severe performance degradation in practice. To this end, we propose a general framework called MMANet to assist incomplete multimodal learning. It consists of three components: the deployment network used for inference, the teacher network transferring comprehensive multimodal information to the deployment network, and the regularization network guiding the deployment network to balance weak modality combinations. Specifically, we propose a novel margin-aware distillation (MAD) to assist the information transfer by weighing the sample contribution with the classification uncertainty. This encourages the deployment network to focus on the samples near decision boundaries and acquire the refined inter-class margin. Besides, we design a modalityaware regularization (MAR) algorithm to mine the weak modality combinations and guide the regularization network to calculate prediction loss for them. This forces the deployment network to improve its representation ability for the weak modality combinations adaptively. Finally, extensive experiments on multimodal classification and segmentation tasks demonstrate that our MMANet outperforms the state-of-the-art significantly. Code is available at: https://github.com/shicaiwei123/MMANet',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560099692004,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 1. Introduction\\nMultimodal learning has achieved great success on many vision tasks such as classification [ 21 ,33 ,46 ], object detection [ 26 ,45 ,53 ], and segmentation [ 5 ,23 ,41 ]. However, most successful methods assume that the models are trained and tested with the same modality data. In fact, limited by device [ 32 ,39 ], user privacy [ 13 ,25 ], and working condition [ 3 ,29 ], it is often very costly or even infeasible to collect complete modality data during the inference stage. There is thus substantial interest in assisting the incomplete or even single modality inference via the complete modality data during training.  \\n\\n<html><body><table><tr><td>Modality</td><td>Customized</td><td>Unified</td><td>Drop</td></tr><tr><td>RGB</td><td>10.01</td><td>11.75</td><td>-1.65</td></tr><tr><td>Depth</td><td>4.45</td><td>5.87</td><td>-1.42</td></tr><tr><td>IR</td><td>11.65</td><td>16.62</td><td>-4.97</td></tr><tr><td>RGB+Depth</td><td>3.41</td><td>4.61</td><td>-1.2</td></tr><tr><td>RGB+IR</td><td>6.32</td><td>6.68</td><td>-0.36</td></tr><tr><td>Depth+IR</td><td>3.54</td><td>4.95</td><td>-1.41</td></tr><tr><td>RGB+Depth+IR</td><td>1.23</td><td>2.21</td><td>-0.98</td></tr></table></body></html>  \\n\\nA typical solution is to reconstruct the sample or feature of the missing modalities from the available ones [ 10 ,14 ,15 ,20 ,29 ,32 ]. Nevertheless, this needs to build a specific model for each modality from all possible modality combinations and thus has high complexity. Recent studies focus on learning a unified model, instead of a bunch of networks, for different modality combinations. Generally, many such approaches [ 6 ,11 ,12 ,17 ,51 ,52 ] attempt to leverage feature fusion strategies to capture modality-invariant representation so that the model can adapt to all possible modality combinations. For example, RFNet [ 11 ] designs the regionaware fusion module to fuse the features of available image modalities.  \\n\\nAlthough the existing unified models are indeed able to increase the efficiency of training and deployment of the multimodal models, their performance is likely to be suboptimal. As shown in Table 1 , the customized models consistently outperform the unified model for different modality combinations. This is because existing unified models usually focus on the modality-invariant features while ignoring the modality-specific information. Note that the complementary modality-specific information of multiple modalities can help refine the inter-class discrimination and improve inference performance [ 2 ,18 ,36 ]. This motivates us to propose the first research question of this paper: Can a unified model consider the modality invariant and specific information simultaneously while maintaining robustness for incomplete modality input?  \\n\\nTo this end, we propose to guide the unified model to learn the comprehensive multimodal information from the teacher model trained with complete modality. This regularizes the target task loss to encourage the unified model to acquire complementary information among different modality combinations multimodal information while preserving the generalization to them. Specifically, we propose a novel margin-aware distillation (MAD) that trains the unified model by guiding it to mimic the inter-sample relation of the teacher model. MAD introduces the classification uncertainty of samples to re-weigh their contribution to the final loss. Since the samples near the class boundary are more likely to be misclassified and have higher classification uncertainty [ 8 ], this encourages the unified model to preserve the inter-class margin refined by the complementary cues and learn the modality-specific information.  \\n\\nAnother limitation of existing unified approaches is that they struggle to obtain optimal performance for the unbalanced training problem. To be specific, conventional multimodal learning models tend to fit the discriminative modality combination and their performance will degrade significantly when facing weak modality combinations. To solve this issue, existing unified approaches introduce the auxiliary discriminator to enhance the discrimination ability of the unimodal combinations [ 6 ,11 ,51 ]. This utilizes a hypothesis that a single modality is weaker than multiple ones. However, as shown in Table 1 , no matter for the customized model or the unified model, the single Depth modality outperforms the RGB, IR, and their combinations. This indicates the combination with multiple weak modalities may be harder to be optimized than a single strong modality. Moreover, as shown in Table 3 , RGB becomes the strong modality while Depth and IR become the weak modalities. This indicates that the modality importance is not fixed but varies with scenarios. These findings motivate us to propose the second research question: How to effectively optimize the weak modality combination in varying scenarios?  \\n\\nTo this end, we design a regularization network and MAR algorithm to assist the training of the unified network. Specifically, the regularization network generates additional predictions for all inputs. Then MAR mines and calculates prediction loss for the sample from the weak combinations. This forces the unified model to improve its representation ability for the weak combination. In detail, MAR mines the weak combination via the memorization effect [ 1 ,16 ,49 ]that DNNs tend to first memorize simple examples before overfitting hard examples. As shown in Fig. 5 (a), the unified model tends to fit the samples containing Depth modality firstly at the early stage. Therefore, MAR first mines the strong modality via the memorization effect. Then it determines the combinations of rest modalities as the weak ones.  \\n\\nFinally, we develop a model and task agnostic framework called MMANet to assist incomplete multimodal learning by combining the proposed MAD and MAR strategies. MMANet can guide the unified model to acquire comprehensive multimodal information and balance the performance of the strong and weak modality combination simultaneously. Extensive comparison and ablation experiments on multimodal classification and segmentation tasks demonstrate the effectiveness of the MMANet.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560159460838,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 2. Related work\\n\\n# 2.1. Multimodal Learning for Missing Modalities\\nMost existing multimodal learning methods assume that all instances consist of full modalities. However, this assumption does not always hold in real-world applications due to the device [ 32 ,39 ], user privacy [ 13 ,25 ], and working condition [ 3 ,29 ]. Recently, many incomplete multimodal learning methods have been proposed and can be roughly categorized into two types: customized methods and unified methods. Customized methods aim to train a specific model to recover the missing modality in each incomplete modality combination. According to the recovering target, the customized methods can be further divided into sample-based methods and representation-based methods. Sample-based methods focus on imputing the missing modality at the input space with generative adversarial networks [ 4 ,27 ,32 ,37 ]. Due to the complexity of sample reconstruction, it is usually unstable and may introduce noise to harm the primary task at hand [ 34 ]. Thus the representationbased methods are proposed to reconstruct the sample representation via the knowledge distillation [ 3 ,14 ,20 ,29 ] or matrix completion [ 30 ,35 ]. Although promising results are obtained, these methods have to train and deploy a specific model for each subset of missing modalities, which has high complexity in practical applications.  \\n\\nThe unified methods aim to train one model to deal with different incomplete modality combinations by extracting the modality-invariant features. For example, HeMIS [ 17 ]learns an embedding of multimodal information by computing statistics (i.e., mean and variance) from any number of available modalities. Furthermore, Chen et al . introduce the feature disentanglement to cancel out the modalityspecific information. Besides, more recent methods, such as LCR [ 52 ] and RFNet [ 11 ] focus on extracting the modalityinvariant representation via different attention mechanisms. Moreover, mmFormer [ 51 ] introduces the transformer block to model the global semantic information for the modalityinvariant embedding. While these methods achieve promising results, they only consider the modality-invariant information while ignoring the modality-specific information. As a result, they usually perform much worse than the customized methods, especially when more than one modality is missing [ 48 ].\\n\\n# 2.2. Knowledge Distillation\\nKnowledge distillation aims to transfer knowledge from a strong teacher to a weaker student network to facilitate supervised learning. Generally, the distillation method can be divided into three types: response-based distillation that matches the softened logits of teachers and students [ 19 ], the representation-based distillation that matches the feature maps [ 24 ,28 ,40 ], and the relation-based distillation that matches the sample relations. [ 38 ,47 ].  \\n\\nWhile originating from the resource-efficient deep learning, knowledge distillation has found wider applications in such areas as incomplete multimodal learning. Here, it is used to transfer the privileged modality information that can only be accessed during the training stage from the teacher to the student [ 3 ,29 ]. Since the input of the teacher and student network is different in incomplete multimodal learning, transferring knowledge by representation-based methods may lead to overfitting [ 15 ]. Recent methods focus on transferring the privileged modality information by the relation-based methods [ 7 ,22 ,48 ]. However, these prior arts usually consider different instances equally and ignore their specificity, which would lead to sub-optimal performance.\\n\\n# 3. Method\\n\\n# 3.1. MMANet\\nIn this section, we introduce a general framework called MMANet to address the challenge of incomplete multimodal learning. As shown in Fig. 1 , it consists of three parts: deployment network, teacher network, and regularization network. Specifically, the deployment network is the inference network. To make it robust to the modality incompleteness, MMANet introduces the Bernoulli indicator $\\\\Delta\\\\;=\\\\;\\\\{\\\\delta_{1}...\\\\delta_{m}\\\\}$ after modality encoders and conducts modality dropout during the training stage by randomly setting some components of $\\\\Delta$ as 0. For missing modalities, the corresponding encoded feature maps will be replaced by a zero matrix. Besides, MMANet introduces the teacher network that is pre-trained with complete multimodal data to transfer the comprehensive multimodal knowledge to the deployment network via the MAD. This helps the deployment network acquire the modality-invariant and specific features simultaneously. Finally, MMANet guides the deployment network to train together with the regularization network that produces additional predictions for the weak modality combination via the MAR. This alleviates the overfitting for strong modality combinations. The total loss to guide the training of the deployment network is defined as follows,  \\n\\n$$\\nL_{t o t a l}=L_{T L}+\\\\alpha L_{M A D}+\\\\beta L_{M A R}\\n$$  \\n\\nwhere $\\\\alpha$ and $\\\\beta$ are the hyper-parameters. $L_{L T}$ is task learning loss, which is determined by the primary task at hand. For example, $L_{L T}$ may be the cross entropy loss when the primary task is classification. $L_{M A D}$ and $L_{M A R}$ are the loss of MAD and MAR respectively.  \\n\\nBesides, the other nations used in MMANet are defined as follows. Given a mini-batch multimodal input $\\\\boldsymbol{x}=\\\\{x_{1},...,x_{m}\\\\}$ ,$x_{m}\\\\in R^{b}$ denot e data of $m_{t h}$ modality. bis the batch size. $E_{m}^{t}$ and $E_{m}^{d}$ denote the encoders for the $m_{t h}$ modality in the teacher and deployment networks, respectively. $F^{t}$ and $F^{d}$ denote the fusion module used in the teacher and deployment networks, respectivel and z$\\\\overline{{\\\\boldsymbol{z}^{d}\\\\mathrm{~\\\\,~}\\\\in~\\\\,~}}\\\\boldsymbol{R}^{b^{d}\\\\times c^{d}\\\\times h^{d}\\\\times w^{d}}$ $\\\\Delta^{d}\\\\in R^{b\\\\times m}$ ∈ector of denote the fused features of $\\\\Delta$ .$z^{t}\\\\in R^{b^{t}\\\\times c^{t}\\\\times h^{t^{\\\\star}}\\\\times w^{t}}$ the teacher and deployment networks, respectively. Here, where $b$ is the batch size, $c$ is the number of output channels, and $h$ and $w$ are spatial dimensions. $P^{t},\\\\,P^{r}$ , and $P^{d}$ denote the task predictor of the teacher, regularization, and deployment networks, respectively. $\\\\boldsymbol{y}^{t},\\\\boldsymbol{y}^{r}$ , and $y^{d}$ denote the $R^{b\\\\times k}$ prediction matrix of the teacher, regularization, and deployment networks, respectively. Here, $k$ is the class number.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560218705384,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 3.2. MAD\\nThis section introduces the proposed MAD strategy for transferring the comprehensive multimodal information from the teacher network to the deployment network. As shown in Fig. 1 , MAD is conducted between the $z^{t}$ and $z^{d}$ .$z^{d}$ of a simple is varying due to the random modality dropout. In contrast, the sample semantic is invariant. Thus, MAD proposes to transfer the teacher’s knowledge via relation consistency instead of feature consistency. This helps avoid overfitting and harming the representation ability of deployment networks. Moreover, MAD proposes to measure the class boundaries and guide the unified model to pay more attention to the samples near them. This can encourage the development network to inherit the refined inter-class margin from the teacher network. Nevertheless, boundaries are usually difficult to detect due to their irregularity. To solve this issue, MAD introduces the classification uncertainty of each sample to re-weight its contribution for the total loss. Since the samples near the class boundaries are more likely to be misclassified and have higher classification uncertainty, this can realize attention to them.  \\n\\n  \\nFigure 1. Overview of the proposed MMANet. It consists of three parts: the deployment network used for final inference, the teacher network transferring comprehensive multimodal knowledge to the deployment network, and the regularization network guiding the deployment network to balance weak modality combinations.  \\n\\n  \\nFigure 2. The illustration of the proposed MAD.  \\n\\nThe overview of the MAD is shown in Fig. 2 . It takes $z^{t},\\\\;z^{d}$ , and $y^{t}$ as the input and consists of three steps: (a) calculating the relation discrepancy vector $g^{t d}\\\\ \\\\in\\\\ \\\\bar{R}^{b}$ calculating the classification un and (c) calculating the total loss $L_{M A D}$ ty vector for MAD. $\\\\pi^{t}\\\\ \\\\in\\\\ R^{b}$ ∈  \\n\\n(a) MAD calculates $z^{t}$ and $g^{t d}$ $z^{d}$ from into $z^{t}$ $z^{t^{\\\\prime}}\\\\,\\\\in\\\\,R^{b^{t}\\\\times c^{t^{\\\\star}}\\\\ast h^{t}*w^{t}}$ and $z^{d}$ . Specifically, and $z^{d^{\\\\prime}}\\\\;\\\\in\\\\;R^{b^{d}\\\\times c^{d}*\\\\bar{h^{d}}*w^{d}}$ matrix ∈$\\\\boldsymbol{r}^{t}\\\\,\\\\in\\\\,R^{b\\\\times b}$ ∈and .r$\\\\boldsymbol{r}^{d}\\\\,\\\\in\\\\,R^{b\\\\times b}$ ∈calculates the relation vame relation function $\\\\mathbf{R}(\\\\mathbf{u},\\\\mathbf{v})$ , respectively. And the $r^{t}(i,j)$ that denotes the relation between $i_{t h}$ and $j_{t h}$ sample representations of the teacher network can be expressed as follows,  \\n\\n$$\\nr^{t}(i,j)=R(z^{t^{\\\\prime}}(i,:),z^{t^{\\\\prime}}(j,:))\\n$$  \\n\\nBesides, the $\\\\boldsymbol{r}^{d}(i,j)$ that denotes the relation between $i_{t h}$ and $j_{t h}$ sample representations of the deployment network  \\n\\ncan be expressed as follows,  \\n\\n$$\\nr^{d}(i,j)=R(z^{d^{\\\\prime}}(i,:),z^{d^{\\\\prime}}(j,:))\\n$$  \\n\\nTheoretically, $R(u,v)$ can be any metric for measuring the vector distance, such as the Euclidean distance and the cosine distance. Because the dimension of the feature vectors of the teacher and the deployment networks could be very high, to eliminate the curse of dimensionality, we choose cosine distance as the $R(u,v)$ ,  \\n\\n$$\\nR(u,v)=\\\\frac{u^{T}v}{\\\\|u\\\\|_{2}\\\\|v\\\\|_{2}}\\n$$  \\n\\nFurthermore, MAD calculates the discrepancy matrix between $r^{t}$ and $r^{d}$ and sum each row to get $g^{t d}$ .  \\n\\n$$\\ng^{t d}=\\\\sum_{i=1}^{b}(r^{t}-r^{d})_{i}\\n$$  \\n\\nHere, $g^{t d}(i)$ denotes the relation gap between the teacher and deployment networks from the $i_{t h}$ sample to other samples in the same mini-batch.  \\n\\n(b) MAD calculates $\\\\pi^{t}$ from $y^{t}$ . In detail, it takes the information entropy of the logit output of each sample as its classification uncertainty. And the classification uncertainty for $i_{t h}$ sample, $\\\\pi^{t}(i)$ , can be expressed as follows,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l}{{\\\\pi^{t}(i)=H(y^{t}(i,:))}}\\\\\\\\ {{H(x)=-\\\\sigma(x)*l o g(\\\\sigma(x))}}\\\\end{array}\\\\right.\\n$$  \\n\\nwhere $\\\\sigma(.)$ is the softmax function for normalization. $H(x)$ is the information entropy of $x$ . A sample that has a higher classification uncertainty is usually closer to the decision boundaries, since it is more likely to be misclassified. Thus, $\\\\pi^{t}(i),i~\\\\in~[1,b]$ can also denote the margin from the $i_{t h}$ sample representation to the decision boundary.  \\n\\n(c) Finally, MAD takes $\\\\pi^{t}(i)$ as the weight for the corresponding component $g^{t d}(i)$ to calculate $L_{M A D}$ ,  \\n\\n$$\\nL_{M A D}=\\\\sum_{i=1}^{b}\\\\sigma(\\\\pi^{t})(i)*g^{t d}(i)\\n$$  \\n\\nThis encourages the deployment network to focus on the samples near the decision boundaries and preserve the interclass margin refined by the comprehensive multimodal information from the teacher network.\\n\\n# 3.3. MAR\\nThis section introduces the MAR algorithm that forces the deployment network to improve its discriminating ability for weak modality combinations adaptively. As shown in Fig. 1 , MAR takes the $y^{r}$ ,$y^{d}$ and $\\\\Delta^{d}$ as as the input to calculate the $L_{M A R}$ . Specifically, MAR first proposes a contrastive ranking strategy to mine the weak modality combinations. Compared to simply taking the combination with a single modality as the weak one, this further considers the combination with multiple modalities and can get more accurate mining results. Then, MAR calculates the prediction loss for the weak modality combinations, guiding the deployment network to pay more attention to them.  \\n\\nThe overview of MAR is shown in Fig. 3 . It consists of two steps: (a) w n$E\\\\,\\\\leq\\\\,N$ , min weak modality combination set Ω, and (b) when $E\\\\,>\\\\,N$ , calculating $L_{M A R}$ . Here $E$ is the current training epoch, and $N$ is the number of warm-up epochs.  \\n\\n(a) MAR calculates $\\\\Omega$ from $y^{d}$ using contrastive rankopo to calculate the predicted output $Y^{O}\\\\in$ $R^{(m+1)\\\\times n\\\\times k}$ of $\\\\Delta_{i}$ ,$i\\\\,\\\\in\\\\,[0,m]$ , on the train set after each training epoch.  \\n\\n$$\\nY^{O}(i,:,:)=y^{d}(\\\\Delta_{i})\\n$$  \\n\\n$n$ is sample number. $\\\\Delta_{i}$ means the $i_{t h}$ component of $\\\\Delta$ is 0. $\\\\Delta_{0}$ means none component of $\\\\Delta$ are 0, which must contain the strong modality. Since the deployment network tends to first memorize the ith strong modality, $\\\\Delta_{w},w\\\\in$ $[1,m]$ that makes $Y^{O}(w,:,:)$ has the biggest distance with $Y^{O}(0,:,:)$ is the hard combination that does not contain the strong modality. And the element of $\\\\Omega$ can be determined as $\\\\Delta_{w}$ and the $\\\\Delta$ consists of the modalities in it.  \\n\\nSpecifically, to make $\\\\Delta_{w}$ robust for the randomness of neural network learning, MAR introduces two innovations. Firstly, MAR calculates the prediction discrepancy from the prediction distribution $Y^{d}\\\\in R^{(m+1)\\\\times k}$ instead of $Y^{O}$ ,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l l}{Y^{d}(i,j)=\\\\sum(Y^{D}(i,:)==j)}\\\\\\\\ {Y^{D}=\\\\arg\\\\operatorname*{max}(Y^{O},d i m=2)}\\\\end{array}\\\\right.\\n$$  \\n\\n  \\nFigure 3. The illustration of the proposed MAR.  \\n\\nwhere $j\\\\in[0,k-1]$ . Compared with $Y^{O}$ ,$Y^{d}$ needs only class-wise but not sample-wise consistency. Then the vector discrepancy vector $g^{d}\\\\in R^{m}$ is defined as follows,  \\n\\n$$\\ng^{d}(i)=K L(l o g(\\\\sigma(Y^{d}(i))),\\\\sigma(Y^{d}(0)))\\n$$  \\n\\nwhere $\\\\mathbf{KL}(,)$ means the KL divergence, $i\\\\in[1,m]$ .  \\n\\nSecondly, MAR introduces a memory bank $M^{d}\\\\;\\\\;\\\\in\\\\;\\\\;$ ∈$R^{N\\\\times m}$ to save the $g^{d}$ among the warm-up epochs and performs average filtering to obtain ${\\\\overline{{g}}}^{d}$ ,  \\n\\n$$\\n\\\\overline{{\\\\boldsymbol{g}}}^{d}=\\\\sum_{i=1}^{N}\\\\frac{1}{N}(\\\\boldsymbol{M}^{d})_{i}\\n$$  \\n\\nwhere $(M^{d})_{i}$ is the $g^{d}$ in the $i_{t h}$ epoch. And $\\\\Delta_{w}$ can be determined as $\\\\Delta_{i}$ where $i=a r g m a x(\\\\overline{{g}}^{d})$ .  \\n\\n(b) MAR calculates $L_{M A R}$ from $y^{r}$ ,$\\\\Delta^{d}$ and $\\\\Omega$ . In detail, MAR st cal ulates the weak combination mask $M^{r}\\\\in R^{b}$ from $\\\\Delta^{d}$ and Ω,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l l}{M^{r}(i)=F A L S E\\\\quad i f\\\\Delta^{d}(i)\\\\notin\\\\Omega}\\\\\\\\ {M^{r}(i)=T R U E\\\\quad i f\\\\Delta^{d}(i)\\\\in\\\\Omega}\\\\end{array}\\\\right.\\n$$  \\n\\nwhere $i\\\\in[0,b-1]$ ,$\\\\Delta^{d}(i)$ is the $\\\\Delta$ for the $i_{t h}$ sample in this mini-batch. Then, the $L_{M A R}$ is defined as follows,  \\n\\n$$\\nL_{M A R}=L_{T L}(y^{r}[M^{r}],l[M^{r}])\\n$$  \\n\\nwhere $l$ is the groundtruth vector for $y^{r}.\\\\quad[.]$ denotes the index operator.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560270347754,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 4. Experiments\\nWe conduct experiments on multimodal classification and segmentation tasks to evaluate the proposed MMANet. In the following, we first compare the MMANet architecture with the state-of-the-art on these two tasks. Then, we ablate the MAD and MAR strategies of MMANet.\\n\\n# 4.1. Performance and Comparison on Multimodal Classification\\nDatasets : We take the face anti-spoofing task as the example of the multimodal classification and conduct experiments on the CASIA-SURF [ 50 ] and CeFA [ 31 ] datasets. Both of them consist of the RGB, Depth, and IR modalities. For CASIA-SURF, we follow the intra-testing protocol suggested by the authors and divide it into train, validation, and test sets with $29\\\\mathbf{k}$ , 1k, and 57k samples, respectively. For CeFA, we follow the cross-ethnicity and cross-attack protocol suggested by the authors and divide it into train, validation, and test sets with $35\\\\mathrm{k}$ , 18k, and $54\\\\mathrm{k}$ samples, respectively. Here we report the results on the test set using the metric of Average Classification Error Rate (ACER) [ 50 ].  \\n\\nTable 2. Performance on the multimodal classification task with CASIA-SURF. $\\\\downarrow$ means that the lower the value, the better the performance.   \\n\\n\\n<html><body><table><tr><td colspan=\"3\">Modalities</td><td colspan=\"7\">ACER(↓)</td></tr><tr><td rowspan=\"2\">RGB</td><td rowspan=\"2\">Depth</td><td rowspan=\"2\">IR</td><td>Customized</td><td colspan=\"5\">Unified</td><td></td></tr><tr><td>SF</td><td>SF-MD</td><td>HeMIS</td><td>LCR</td><td>RFNet</td><td>MMFormer</td><td>MMANet</td></tr><tr><td></td><td>O</td><td>O</td><td>10.01</td><td>11.75</td><td>14.36</td><td>13.44</td><td>12.43</td><td>11.15</td><td>8.57</td></tr><tr><td>O</td><td></td><td>O</td><td>4.45</td><td>5.87</td><td>4.70</td><td>4.40</td><td>4.17</td><td>4.67</td><td>2.27</td></tr><tr><td>O</td><td>O</td><td></td><td>11.65</td><td>16.62</td><td>16.21</td><td>15.26</td><td>14.69</td><td>13.99</td><td>10.04</td></tr><tr><td></td><td></td><td>O</td><td>3.41</td><td>4.61</td><td>3.23</td><td>3.32</td><td>2.23</td><td>1.93</td><td>1.61</td></tr><tr><td></td><td>O</td><td></td><td>6.32</td><td>6.68</td><td>6.27</td><td>5.16</td><td>4.27</td><td>4.77</td><td>3.01</td></tr><tr><td>O</td><td></td><td></td><td>3.54</td><td>4.95</td><td>3.68</td><td>3.53</td><td>3.22</td><td>3.10</td><td>1.18</td></tr><tr><td></td><td></td><td></td><td>1.23</td><td>2.21</td><td>1.97</td><td>1.88</td><td>1.18</td><td>1.94</td><td>0.87</td></tr><tr><td colspan=\"2\">Average</td><td></td><td>5.80</td><td>7.52</td><td>7.18</td><td>6.71</td><td>6.02</td><td>5.93</td><td>3.94</td></tr></table></body></html>  \\n\\nTable 3. Performance on the multimodal classification task with the CeFA dataset.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modalities</td><td colspan=\"3\">ACER(↓)</td></tr><tr><td rowspan=\"3\">RGB Depth</td><td rowspan=\"3\">IR</td><td>Customized</td><td colspan=\"2\">Unified</td></tr><tr><td>SF</td><td>MMFormer</td><td>MMANet</td></tr><tr><td></td><td>28.51</td><td></td></tr><tr><td></td><td>O</td><td>27.44</td><td></td><td>27.15</td></tr><tr><td>O</td><td></td><td>33.75</td><td>33.58</td><td>32.50</td></tr><tr><td>O</td><td>O</td><td>36.17</td><td>39.56</td><td>35.62</td></tr><tr><td></td><td>O</td><td>35.62</td><td>29.47</td><td>22.87</td></tr><tr><td></td><td>O</td><td>31.62</td><td>27.66</td><td>23.27</td></tr><tr><td>O</td><td></td><td>36.62</td><td>32.17</td><td>30.45</td></tr><tr><td></td><td></td><td>24.15</td><td>30.72</td><td>23.68</td></tr><tr><td colspan=\"2\">Average</td><td>32.20</td><td>31.52</td><td>27.94</td></tr></table></body></html>  \\n\\nImplementation : We use random flipping, rotation, and cropping for data augmentation. All models are optimized by an SGD for 100 epochs with a mini-batch 64. The learning rate is initialized to 0.001 with 5 epochs of linear warmup and divided by 10 at 16, 33, and 50 epochs. Weight decay and momentum are set to 0.0005 and 0.9, respectively.  \\n\\nThe hyper-parameters of comparison methods use the suggested ones in the original articles. The $(\\\\alpha,\\\\beta)$ for MMANet is set as (30 ,0 .5) and (30 ,0 .5) for CASIA-SURF and CeFA, respectively. The warm-up epoch $N$ is set as 5.  \\n\\nComparison : Here we compare MMANet with two different unified methods for incomplete multimodal learning. One is an early method that only focuses on extracting modality-invariant features, such as HeMIS [ 17 ] and LCR [ 52 ]. Another is the enhanced method that further considers improving the discrimination ability for single-modal combinations, such as RFNet [ 11 ], and mmFormer [ 51 ].  \\n\\nBesides, we introduce two baseline methods, SF and SF-MD. SF [ 50 ] is the benchmark method of the CASIASURF, which is a customized method that trains the model for each modality combination. SF-MD is the variant of SF by simply adding the Bernoulli indicator after its modality encoder. This enables SF-MD to become a unified model that trains a single model for all modality combinations.  \\n\\nFinally, for a fair comparison, we follow the basic implementation of SF for all the comparison methods. Specifically, we unify the modality encoders of HeMIS, LCR, RFNet, and mmFormer as the ResNet18 used in SF. Besides, we set the SF model trained with complete multimodal data as the teacher network and the SF-MD model as the development network.  \\n\\nResults :Table 2 and Table 3 show the comparison results with the state-of-the-art methods on the CASISSURF and CeFA datasets, respectively. Compared with the second-best unified method, i.e. mmFormer, MMANet decreases the average ACER by $1.99\\\\%$ and $3.58\\\\%$ on the CASIS-SURF and CeFA, respectively. Besides, we can see that MMANet achieves the best performance on both datasets for all the nine modality combinations for CASIASURF. This shows the superiority of our method on the incomplete multimodal classification task. More importantly, MMANet even outperforms the customized baseline method, i.e. SF, for all the modality combinations on the CASIA-SURF and CeFA, decreasing the average ACER by $1.86\\\\%$ and $4.26\\\\%$ . This demonstrates the effectiveness of the proposed MAD and MAR for the incomplete multimodal classification task.  \\n\\nTable 4. Performance on the multimodal segmentation task with NYUv2. $\\\\uparrow$ means that the higher the value, the better the performance.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modality</td><td colspan=\"7\">mIOU(↑)</td></tr><tr><td rowspan=\"2\">RGB</td><td rowspan=\"2\">Depth</td><td colspan=\"6\">Unified</td></tr><tr><td>Customized ESANet</td><td>ESANet-MD</td><td>HeMIS</td><td>LCR</td><td>RFNet</td><td>mmFormer</td><td>MMANet</td></tr><tr><td></td><td></td><td>44.22</td><td>41.34</td><td>33.23</td><td>41.91</td><td>42.89</td><td>43.22</td><td>44.93</td></tr><tr><td></td><td></td><td>40.55</td><td>39.76</td><td>31.23</td><td>39.88</td><td>40.76</td><td>41.12</td><td>42.75</td></tr><tr><td></td><td></td><td>49.18</td><td>47.23</td><td>37.77</td><td>47.46</td><td>48.13</td><td>48.45</td><td>49.62</td></tr><tr><td colspan=\"2\">Average</td><td>44.65</td><td>42.77</td><td>34.07</td><td>43.08</td><td>43.92</td><td>44.26</td><td>45.58</td></tr></table></body></html>  \\n\\nTable 5. Performance on the multimodal segmentation task with the Cityscapes dataset.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modality</td><td colspan=\"2\">mIOU(↑)</td></tr><tr><td rowspan=\"2\">RGB Depth</td><td>Customized</td><td colspan=\"2\">Unified</td></tr><tr><td>ESANet</td><td>mmFormer</td><td>MMANet</td></tr><tr><td></td><td>77.60</td><td>76.62</td><td>77.61</td></tr><tr><td></td><td>59.11</td><td>58.53</td><td>60.12</td></tr><tr><td></td><td>78.62 71.77</td><td>78.01</td><td>78.89</td></tr><tr><td colspan=\"2\">Average</td><td>71.05</td><td>72.20</td></tr></table></body></html>\\n\\n# 4.2. Performance and Comparison on Multimodal Segmentation\\nDatasets : We take the semantic segmentation task as the example of multimodal segmentation and conduct experiments on the NYUv2 [ 43 ] and Cityscapes [ 9 ] datasets. Both of them consist of the RGB and Depth modalities. Specifically, NYUv2 contains 1,449 indoor RGB-D images, of which 795 are used for training and 654 for testing. We used the common 40-class label setting. Cityscapes is a large-scale outdoor RGB-D dataset for urban scene understanding. It contains 5,000 finely annotated samples with a resolution of $2048\\\\!\\\\times\\\\!1024$ , of which 2,975 for training, 500 for validation, and 1,525 for testing. Cityscapes also provides 20k coarsely annotated images, which we did not use for training . For both datasets, we report the results on the validation set using the metric of mean IOU (mIOU).  \\n\\nImplementation : We use random scaling, cropping, color jittering, and flipping for data augmentation. All models are optimized by Adam for 300 epochs with a mini-batch 8. The learning rate is initialized with 1e-2 and adapted by the PyTorch’s one-cycle scheduler [ 44 ].  \\n\\nThe hyper-parameters of the comparison methods use the suggested ones in their article.The hyper-parameter $(\\\\alpha,\\\\beta)$ for MMANet is set as $(4,0.2)$ and (10 ,0 .1) for the NYUv2 and Cityscapes datasets, respectively. The warmup epoch $N$ is set as 10.  \\n\\nComparison :We also compare MMANet with the HeMIS [ 17 ], LCR [ 52 ], RFNet [ 11 ], and mmFormer [ 51 ]. Here, we set ESANet and ESANnet-MD as the baseline.  \\n\\nESANet [ 42 ] is an efficient and robust model for RGBD segmentation, which trains the model for each modality combination. ESANet-MD is the variant of ESANet by simply adding the Bernoulli indicator after its modality encoder. ESANet-MD trains only a single model for all modality combinations. Finally, for a fair comparison, we unify the modality encoder of HeMIS, LCR, RFNet, and mmFormer as the ResNet50 with NBt1 used in ESANet. Besides, we set the ESANet model trained with complete multimodal data as the teacher network and the ESANetMD model as the development network.  \\n\\nResults : Table 4 and Table 5 list the comparison results on the NYUv2 and Cityscapes datasets, respectively. From these results, we can see that MMANet achieves the best performance on both datasets for all the modality combinations. In particular, it outperforms the second-best method, mmFormer, by $1.32\\\\%$ and $1.05\\\\%$ in the NYUv2 and Cityscapes datasets, respectively. This demonstrates the effectiveness of the MMANet on the multimodal segmentation task. Moreover, MMANet improves the average performance of ESANet-MD by $2.81\\\\%$ in the NYUv2 dataset and even outperforms the customized baseline, ESANet, by $0.97\\\\%$ and $0.43\\\\%$ in NYUv2 and Cityscapes datasets. This shows the effectiveness of the proposed MAD and MAR on the incomplete multimodal segmentation task.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560329330156,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# 5. Ablation Study\\nThis section will study the effectiveness of MAD and MAR and conduct extensive ablation experiments on four datasets. Limited by page, we only present the results of the CASIA-SURF dataset and other results can be seen in the supplementary material.\\n\\n# 5.1. The effectiveness of MAD\\nTo study the effect of MAD, we conduct experiments to compare the performance of the vanilla SF-MD and its variant with SP and MAD. Here, SP is the degradation method of MAD that transfers knowledge by directly matching the cosine distance of the sample representations between teacher and deployment networks. The results are shown in Table 6 . We can see that the variant of SF-MD consistently outperforms the vanilla SF-MD in all the modality combination and improve its performance by $1.6\\\\%$ and $2.93\\\\%$ in average. This demonstrates the effectiveness of transferring comprehensive multimodal information from the teacher network to the deployment network. Furthermore, the proposed MAD outperforms SP by $1.33\\\\%$ , which demonstrates the validity of re-weighing sample loss via the classification uncertainty. This is because the classification uncertainty re-weighing can encourage the deployment to focus on the hard samples and thus acquire a more separable inter-class margin than the conventional SP (see Fig. 4 ).  \\n\\n  \\nFigure 4. The prediction distribution of the SF-MD model assisted by SP and MAD on CASIA-SURF dataset. X-axis represents the normalized logit output and $\\\\scriptstyle\\\\mathbf{X}=0.5$ is the classification boundary. orange and blue dots denotes two different classes.  \\n\\nTable 7. Ablation result of MAR on the CASIA-SURF dataset.   \\n\\n\\n<html><body><table><tr><td>RGB</td><td>Depth IR</td><td>SF-MAD +SR</td><td>+MAR</td></tr><tr><td></td><td>O O</td><td>10.36 9.17</td><td>8.57</td></tr><tr><td rowspan=\"3\"></td><td>O</td><td>2.54 1.89</td><td>2.27</td></tr><tr><td>O</td><td>11.67</td><td>10.21 10.04</td></tr><tr><td>O</td><td>1.23</td><td>1.66 1.61</td></tr><tr><td></td><td>O</td><td>4.09</td><td>4.37 3.01</td></tr><tr><td rowspan=\"2\"></td><td></td><td>1.44</td><td>2.12 1.18</td></tr><tr><td></td><td>0.77</td><td>0.92 0.87</td></tr><tr><td colspan=\"2\">Average</td><td>4.57</td><td>4.33 3.94</td></tr></table></body></html>  \\n\\n  \\nFigure 5. (a) The learning process of different modality combinations on the CASIA-SURF dataset during the warm-up stage. (b) Th corresponding $g^{d}\\\\in R^{3}$ for (dashed line) and its average result $\\\\overline{{g}}^{d}$ (solid line) for the warm-up stage.  \\n\\n‘RGB’, ‘IR’ and, $\\\\mathbf{\\\\dot{\\\\rho}}\\\\mathbf{RGB{+}I R}^{\\\\prime}$ . However, SR only focuses on the combinations of single modality, RGB $(1.19\\\\%)$ , IR $(1.46\\\\%)$ , and Depth $(0.65\\\\%)$ , where ‘Depth’ is exactly a strong modality. In contrast, Fig. 5 (b) shows that the prediction discrepancy between $\\\\mathbf{\\\\dot{\\\\rho}}\\\\mathbf{RGB+IR}^{\\\\prime}$ ’ and ‘RGB+Depth+IR’ is the largest. And the performance gain of MAR mainly comes from RGB $(1.79\\\\%)$ , IR $(1.63\\\\%)$ , as well as the combination of RGB and IR $(1.02\\\\%)$ . These results show that MAR can mine the weak modality combinations more accurately and force the deployment network to improve its discrimination ability for them.\\n\\n# 5.2. The effectiveness of MAR\\nTo study the effect of MAR, we conduct experiments to compare the performance of the SF-MAD, namely the SFMD with the MAD, and its variant with SR and MAR. Here SR is the conventional modality regularization strategy considering only the single modality combination. As shown in Table 6 , SR and MAR improve the performance of SFMAD by $0.24\\\\%$ and $0.63\\\\%$ in average, respectively, showing the effeteness to regularize the single and weak modality combinations. Moreover, MAR outperforms SR by $0.39\\\\%$ in average, demonstrating the superiority of MAR.  \\n\\nHere the average gain of SR and MAR is less than SP and MAD since they aim to improve the performance of only the weak, not all combinations. Specifically, as shown in Table 6 , the three worst-performing combinations are\\n\\n# 6. Conclusion\\nThis paper presents an MMANet framework to aid the deployment network for incomplete multimodal learning. Specifically, MMANet introduces a teacher network pretrained with complete multimodal data to transfer the comprehensive multimodal information to the deployment network via MAD. This helps it acquire modality-invariant and specific information while maintaining robustness for incomplete modality input. Besides, MMANet introduces a regularization network to mine and regularize weak modality combinations via MAR. This forces the deployment network to improve its discrimination ability for them effectively and adaptively. Finally, extensive experiments demonstrate the effectiveness of the proposed MMANet, MAD, and MAR for incomplete multimodal learning.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023}]]"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "await query.query_by_title_like(title,top_k=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "statments_reference = re.findall(r\"(?:<sup>.*?</sup><ss>.*?</ss>)+\", final_survey)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "references = re.findall(r\"\\[(\\d+)\\] (.*)\", final_survey)\n",
    "reference_dict = {ref[0]: ref[1] for ref in references}\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[{'id': 454847560048311778,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning\\nShicai Wei Chunbo Luo Yang Luo School of Information and Communication Engineering University of Electronic Science and Technology of China  \\n\\n {c.luo, luoyang }@uestc.edu.cn\\n\\n# Abstract\\nMultimodal learning has shown great potentials in numerous scenes and attracts increasing interest recently. However, it often encounters the problem of missing modality data and thus suffers severe performance degradation in practice. To this end, we propose a general framework called MMANet to assist incomplete multimodal learning. It consists of three components: the deployment network used for inference, the teacher network transferring comprehensive multimodal information to the deployment network, and the regularization network guiding the deployment network to balance weak modality combinations. Specifically, we propose a novel margin-aware distillation (MAD) to assist the information transfer by weighing the sample contribution with the classification uncertainty. This encourages the deployment network to focus on the samples near decision boundaries and acquire the refined inter-class margin. Besides, we design a modalityaware regularization (MAR) algorithm to mine the weak modality combinations and guide the regularization network to calculate prediction loss for them. This forces the deployment network to improve its representation ability for the weak modality combinations adaptively. Finally, extensive experiments on multimodal classification and segmentation tasks demonstrate that our MMANet outperforms the state-of-the-art significantly. Code is available at: https://github.com/shicaiwei123/MMANet',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560099692004,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 1. Introduction\\nMultimodal learning has achieved great success on many vision tasks such as classification [ 21 ,33 ,46 ], object detection [ 26 ,45 ,53 ], and segmentation [ 5 ,23 ,41 ]. However, most successful methods assume that the models are trained and tested with the same modality data. In fact, limited by device [ 32 ,39 ], user privacy [ 13 ,25 ], and working condition [ 3 ,29 ], it is often very costly or even infeasible to collect complete modality data during the inference stage. There is thus substantial interest in assisting the incomplete or even single modality inference via the complete modality data during training.  \\n\\n<html><body><table><tr><td>Modality</td><td>Customized</td><td>Unified</td><td>Drop</td></tr><tr><td>RGB</td><td>10.01</td><td>11.75</td><td>-1.65</td></tr><tr><td>Depth</td><td>4.45</td><td>5.87</td><td>-1.42</td></tr><tr><td>IR</td><td>11.65</td><td>16.62</td><td>-4.97</td></tr><tr><td>RGB+Depth</td><td>3.41</td><td>4.61</td><td>-1.2</td></tr><tr><td>RGB+IR</td><td>6.32</td><td>6.68</td><td>-0.36</td></tr><tr><td>Depth+IR</td><td>3.54</td><td>4.95</td><td>-1.41</td></tr><tr><td>RGB+Depth+IR</td><td>1.23</td><td>2.21</td><td>-0.98</td></tr></table></body></html>  \\n\\nA typical solution is to reconstruct the sample or feature of the missing modalities from the available ones [ 10 ,14 ,15 ,20 ,29 ,32 ]. Nevertheless, this needs to build a specific model for each modality from all possible modality combinations and thus has high complexity. Recent studies focus on learning a unified model, instead of a bunch of networks, for different modality combinations. Generally, many such approaches [ 6 ,11 ,12 ,17 ,51 ,52 ] attempt to leverage feature fusion strategies to capture modality-invariant representation so that the model can adapt to all possible modality combinations. For example, RFNet [ 11 ] designs the regionaware fusion module to fuse the features of available image modalities.  \\n\\nAlthough the existing unified models are indeed able to increase the efficiency of training and deployment of the multimodal models, their performance is likely to be suboptimal. As shown in Table 1 , the customized models consistently outperform the unified model for different modality combinations. This is because existing unified models usually focus on the modality-invariant features while ignoring the modality-specific information. Note that the complementary modality-specific information of multiple modalities can help refine the inter-class discrimination and improve inference performance [ 2 ,18 ,36 ]. This motivates us to propose the first research question of this paper: Can a unified model consider the modality invariant and specific information simultaneously while maintaining robustness for incomplete modality input?  \\n\\nTo this end, we propose to guide the unified model to learn the comprehensive multimodal information from the teacher model trained with complete modality. This regularizes the target task loss to encourage the unified model to acquire complementary information among different modality combinations multimodal information while preserving the generalization to them. Specifically, we propose a novel margin-aware distillation (MAD) that trains the unified model by guiding it to mimic the inter-sample relation of the teacher model. MAD introduces the classification uncertainty of samples to re-weigh their contribution to the final loss. Since the samples near the class boundary are more likely to be misclassified and have higher classification uncertainty [ 8 ], this encourages the unified model to preserve the inter-class margin refined by the complementary cues and learn the modality-specific information.  \\n\\nAnother limitation of existing unified approaches is that they struggle to obtain optimal performance for the unbalanced training problem. To be specific, conventional multimodal learning models tend to fit the discriminative modality combination and their performance will degrade significantly when facing weak modality combinations. To solve this issue, existing unified approaches introduce the auxiliary discriminator to enhance the discrimination ability of the unimodal combinations [ 6 ,11 ,51 ]. This utilizes a hypothesis that a single modality is weaker than multiple ones. However, as shown in Table 1 , no matter for the customized model or the unified model, the single Depth modality outperforms the RGB, IR, and their combinations. This indicates the combination with multiple weak modalities may be harder to be optimized than a single strong modality. Moreover, as shown in Table 3 , RGB becomes the strong modality while Depth and IR become the weak modalities. This indicates that the modality importance is not fixed but varies with scenarios. These findings motivate us to propose the second research question: How to effectively optimize the weak modality combination in varying scenarios?  \\n\\nTo this end, we design a regularization network and MAR algorithm to assist the training of the unified network. Specifically, the regularization network generates additional predictions for all inputs. Then MAR mines and calculates prediction loss for the sample from the weak combinations. This forces the unified model to improve its representation ability for the weak combination. In detail, MAR mines the weak combination via the memorization effect [ 1 ,16 ,49 ]that DNNs tend to first memorize simple examples before overfitting hard examples. As shown in Fig. 5 (a), the unified model tends to fit the samples containing Depth modality firstly at the early stage. Therefore, MAR first mines the strong modality via the memorization effect. Then it determines the combinations of rest modalities as the weak ones.  \\n\\nFinally, we develop a model and task agnostic framework called MMANet to assist incomplete multimodal learning by combining the proposed MAD and MAR strategies. MMANet can guide the unified model to acquire comprehensive multimodal information and balance the performance of the strong and weak modality combination simultaneously. Extensive comparison and ablation experiments on multimodal classification and segmentation tasks demonstrate the effectiveness of the MMANet.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560159460838,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 2. Related work\\n\\n# 2.1. Multimodal Learning for Missing Modalities\\nMost existing multimodal learning methods assume that all instances consist of full modalities. However, this assumption does not always hold in real-world applications due to the device [ 32 ,39 ], user privacy [ 13 ,25 ], and working condition [ 3 ,29 ]. Recently, many incomplete multimodal learning methods have been proposed and can be roughly categorized into two types: customized methods and unified methods. Customized methods aim to train a specific model to recover the missing modality in each incomplete modality combination. According to the recovering target, the customized methods can be further divided into sample-based methods and representation-based methods. Sample-based methods focus on imputing the missing modality at the input space with generative adversarial networks [ 4 ,27 ,32 ,37 ]. Due to the complexity of sample reconstruction, it is usually unstable and may introduce noise to harm the primary task at hand [ 34 ]. Thus the representationbased methods are proposed to reconstruct the sample representation via the knowledge distillation [ 3 ,14 ,20 ,29 ] or matrix completion [ 30 ,35 ]. Although promising results are obtained, these methods have to train and deploy a specific model for each subset of missing modalities, which has high complexity in practical applications.  \\n\\nThe unified methods aim to train one model to deal with different incomplete modality combinations by extracting the modality-invariant features. For example, HeMIS [ 17 ]learns an embedding of multimodal information by computing statistics (i.e., mean and variance) from any number of available modalities. Furthermore, Chen et al . introduce the feature disentanglement to cancel out the modalityspecific information. Besides, more recent methods, such as LCR [ 52 ] and RFNet [ 11 ] focus on extracting the modalityinvariant representation via different attention mechanisms. Moreover, mmFormer [ 51 ] introduces the transformer block to model the global semantic information for the modalityinvariant embedding. While these methods achieve promising results, they only consider the modality-invariant information while ignoring the modality-specific information. As a result, they usually perform much worse than the customized methods, especially when more than one modality is missing [ 48 ].\\n\\n# 2.2. Knowledge Distillation\\nKnowledge distillation aims to transfer knowledge from a strong teacher to a weaker student network to facilitate supervised learning. Generally, the distillation method can be divided into three types: response-based distillation that matches the softened logits of teachers and students [ 19 ], the representation-based distillation that matches the feature maps [ 24 ,28 ,40 ], and the relation-based distillation that matches the sample relations. [ 38 ,47 ].  \\n\\nWhile originating from the resource-efficient deep learning, knowledge distillation has found wider applications in such areas as incomplete multimodal learning. Here, it is used to transfer the privileged modality information that can only be accessed during the training stage from the teacher to the student [ 3 ,29 ]. Since the input of the teacher and student network is different in incomplete multimodal learning, transferring knowledge by representation-based methods may lead to overfitting [ 15 ]. Recent methods focus on transferring the privileged modality information by the relation-based methods [ 7 ,22 ,48 ]. However, these prior arts usually consider different instances equally and ignore their specificity, which would lead to sub-optimal performance.\\n\\n# 3. Method\\n\\n# 3.1. MMANet\\nIn this section, we introduce a general framework called MMANet to address the challenge of incomplete multimodal learning. As shown in Fig. 1 , it consists of three parts: deployment network, teacher network, and regularization network. Specifically, the deployment network is the inference network. To make it robust to the modality incompleteness, MMANet introduces the Bernoulli indicator $\\\\Delta\\\\;=\\\\;\\\\{\\\\delta_{1}...\\\\delta_{m}\\\\}$ after modality encoders and conducts modality dropout during the training stage by randomly setting some components of $\\\\Delta$ as 0. For missing modalities, the corresponding encoded feature maps will be replaced by a zero matrix. Besides, MMANet introduces the teacher network that is pre-trained with complete multimodal data to transfer the comprehensive multimodal knowledge to the deployment network via the MAD. This helps the deployment network acquire the modality-invariant and specific features simultaneously. Finally, MMANet guides the deployment network to train together with the regularization network that produces additional predictions for the weak modality combination via the MAR. This alleviates the overfitting for strong modality combinations. The total loss to guide the training of the deployment network is defined as follows,  \\n\\n$$\\nL_{t o t a l}=L_{T L}+\\\\alpha L_{M A D}+\\\\beta L_{M A R}\\n$$  \\n\\nwhere $\\\\alpha$ and $\\\\beta$ are the hyper-parameters. $L_{L T}$ is task learning loss, which is determined by the primary task at hand. For example, $L_{L T}$ may be the cross entropy loss when the primary task is classification. $L_{M A D}$ and $L_{M A R}$ are the loss of MAD and MAR respectively.  \\n\\nBesides, the other nations used in MMANet are defined as follows. Given a mini-batch multimodal input $\\\\boldsymbol{x}=\\\\{x_{1},...,x_{m}\\\\}$ ,$x_{m}\\\\in R^{b}$ denot e data of $m_{t h}$ modality. bis the batch size. $E_{m}^{t}$ and $E_{m}^{d}$ denote the encoders for the $m_{t h}$ modality in the teacher and deployment networks, respectively. $F^{t}$ and $F^{d}$ denote the fusion module used in the teacher and deployment networks, respectivel and z$\\\\overline{{\\\\boldsymbol{z}^{d}\\\\mathrm{~\\\\,~}\\\\in~\\\\,~}}\\\\boldsymbol{R}^{b^{d}\\\\times c^{d}\\\\times h^{d}\\\\times w^{d}}$ $\\\\Delta^{d}\\\\in R^{b\\\\times m}$ ∈ector of denote the fused features of $\\\\Delta$ .$z^{t}\\\\in R^{b^{t}\\\\times c^{t}\\\\times h^{t^{\\\\star}}\\\\times w^{t}}$ the teacher and deployment networks, respectively. Here, where $b$ is the batch size, $c$ is the number of output channels, and $h$ and $w$ are spatial dimensions. $P^{t},\\\\,P^{r}$ , and $P^{d}$ denote the task predictor of the teacher, regularization, and deployment networks, respectively. $\\\\boldsymbol{y}^{t},\\\\boldsymbol{y}^{r}$ , and $y^{d}$ denote the $R^{b\\\\times k}$ prediction matrix of the teacher, regularization, and deployment networks, respectively. Here, $k$ is the class number.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560218705384,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 3.2. MAD\\nThis section introduces the proposed MAD strategy for transferring the comprehensive multimodal information from the teacher network to the deployment network. As shown in Fig. 1 , MAD is conducted between the $z^{t}$ and $z^{d}$ .$z^{d}$ of a simple is varying due to the random modality dropout. In contrast, the sample semantic is invariant. Thus, MAD proposes to transfer the teacher’s knowledge via relation consistency instead of feature consistency. This helps avoid overfitting and harming the representation ability of deployment networks. Moreover, MAD proposes to measure the class boundaries and guide the unified model to pay more attention to the samples near them. This can encourage the development network to inherit the refined inter-class margin from the teacher network. Nevertheless, boundaries are usually difficult to detect due to their irregularity. To solve this issue, MAD introduces the classification uncertainty of each sample to re-weight its contribution for the total loss. Since the samples near the class boundaries are more likely to be misclassified and have higher classification uncertainty, this can realize attention to them.  \\n\\n  \\nFigure 1. Overview of the proposed MMANet. It consists of three parts: the deployment network used for final inference, the teacher network transferring comprehensive multimodal knowledge to the deployment network, and the regularization network guiding the deployment network to balance weak modality combinations.  \\n\\n  \\nFigure 2. The illustration of the proposed MAD.  \\n\\nThe overview of the MAD is shown in Fig. 2 . It takes $z^{t},\\\\;z^{d}$ , and $y^{t}$ as the input and consists of three steps: (a) calculating the relation discrepancy vector $g^{t d}\\\\ \\\\in\\\\ \\\\bar{R}^{b}$ calculating the classification un and (c) calculating the total loss $L_{M A D}$ ty vector for MAD. $\\\\pi^{t}\\\\ \\\\in\\\\ R^{b}$ ∈  \\n\\n(a) MAD calculates $z^{t}$ and $g^{t d}$ $z^{d}$ from into $z^{t}$ $z^{t^{\\\\prime}}\\\\,\\\\in\\\\,R^{b^{t}\\\\times c^{t^{\\\\star}}\\\\ast h^{t}*w^{t}}$ and $z^{d}$ . Specifically, and $z^{d^{\\\\prime}}\\\\;\\\\in\\\\;R^{b^{d}\\\\times c^{d}*\\\\bar{h^{d}}*w^{d}}$ matrix ∈$\\\\boldsymbol{r}^{t}\\\\,\\\\in\\\\,R^{b\\\\times b}$ ∈and .r$\\\\boldsymbol{r}^{d}\\\\,\\\\in\\\\,R^{b\\\\times b}$ ∈calculates the relation vame relation function $\\\\mathbf{R}(\\\\mathbf{u},\\\\mathbf{v})$ , respectively. And the $r^{t}(i,j)$ that denotes the relation between $i_{t h}$ and $j_{t h}$ sample representations of the teacher network can be expressed as follows,  \\n\\n$$\\nr^{t}(i,j)=R(z^{t^{\\\\prime}}(i,:),z^{t^{\\\\prime}}(j,:))\\n$$  \\n\\nBesides, the $\\\\boldsymbol{r}^{d}(i,j)$ that denotes the relation between $i_{t h}$ and $j_{t h}$ sample representations of the deployment network  \\n\\ncan be expressed as follows,  \\n\\n$$\\nr^{d}(i,j)=R(z^{d^{\\\\prime}}(i,:),z^{d^{\\\\prime}}(j,:))\\n$$  \\n\\nTheoretically, $R(u,v)$ can be any metric for measuring the vector distance, such as the Euclidean distance and the cosine distance. Because the dimension of the feature vectors of the teacher and the deployment networks could be very high, to eliminate the curse of dimensionality, we choose cosine distance as the $R(u,v)$ ,  \\n\\n$$\\nR(u,v)=\\\\frac{u^{T}v}{\\\\|u\\\\|_{2}\\\\|v\\\\|_{2}}\\n$$  \\n\\nFurthermore, MAD calculates the discrepancy matrix between $r^{t}$ and $r^{d}$ and sum each row to get $g^{t d}$ .  \\n\\n$$\\ng^{t d}=\\\\sum_{i=1}^{b}(r^{t}-r^{d})_{i}\\n$$  \\n\\nHere, $g^{t d}(i)$ denotes the relation gap between the teacher and deployment networks from the $i_{t h}$ sample to other samples in the same mini-batch.  \\n\\n(b) MAD calculates $\\\\pi^{t}$ from $y^{t}$ . In detail, it takes the information entropy of the logit output of each sample as its classification uncertainty. And the classification uncertainty for $i_{t h}$ sample, $\\\\pi^{t}(i)$ , can be expressed as follows,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l}{{\\\\pi^{t}(i)=H(y^{t}(i,:))}}\\\\\\\\ {{H(x)=-\\\\sigma(x)*l o g(\\\\sigma(x))}}\\\\end{array}\\\\right.\\n$$  \\n\\nwhere $\\\\sigma(.)$ is the softmax function for normalization. $H(x)$ is the information entropy of $x$ . A sample that has a higher classification uncertainty is usually closer to the decision boundaries, since it is more likely to be misclassified. Thus, $\\\\pi^{t}(i),i~\\\\in~[1,b]$ can also denote the margin from the $i_{t h}$ sample representation to the decision boundary.  \\n\\n(c) Finally, MAD takes $\\\\pi^{t}(i)$ as the weight for the corresponding component $g^{t d}(i)$ to calculate $L_{M A D}$ ,  \\n\\n$$\\nL_{M A D}=\\\\sum_{i=1}^{b}\\\\sigma(\\\\pi^{t})(i)*g^{t d}(i)\\n$$  \\n\\nThis encourages the deployment network to focus on the samples near the decision boundaries and preserve the interclass margin refined by the comprehensive multimodal information from the teacher network.\\n\\n# 3.3. MAR\\nThis section introduces the MAR algorithm that forces the deployment network to improve its discriminating ability for weak modality combinations adaptively. As shown in Fig. 1 , MAR takes the $y^{r}$ ,$y^{d}$ and $\\\\Delta^{d}$ as as the input to calculate the $L_{M A R}$ . Specifically, MAR first proposes a contrastive ranking strategy to mine the weak modality combinations. Compared to simply taking the combination with a single modality as the weak one, this further considers the combination with multiple modalities and can get more accurate mining results. Then, MAR calculates the prediction loss for the weak modality combinations, guiding the deployment network to pay more attention to them.  \\n\\nThe overview of MAR is shown in Fig. 3 . It consists of two steps: (a) w n$E\\\\,\\\\leq\\\\,N$ , min weak modality combination set Ω, and (b) when $E\\\\,>\\\\,N$ , calculating $L_{M A R}$ . Here $E$ is the current training epoch, and $N$ is the number of warm-up epochs.  \\n\\n(a) MAR calculates $\\\\Omega$ from $y^{d}$ using contrastive rankopo to calculate the predicted output $Y^{O}\\\\in$ $R^{(m+1)\\\\times n\\\\times k}$ of $\\\\Delta_{i}$ ,$i\\\\,\\\\in\\\\,[0,m]$ , on the train set after each training epoch.  \\n\\n$$\\nY^{O}(i,:,:)=y^{d}(\\\\Delta_{i})\\n$$  \\n\\n$n$ is sample number. $\\\\Delta_{i}$ means the $i_{t h}$ component of $\\\\Delta$ is 0. $\\\\Delta_{0}$ means none component of $\\\\Delta$ are 0, which must contain the strong modality. Since the deployment network tends to first memorize the ith strong modality, $\\\\Delta_{w},w\\\\in$ $[1,m]$ that makes $Y^{O}(w,:,:)$ has the biggest distance with $Y^{O}(0,:,:)$ is the hard combination that does not contain the strong modality. And the element of $\\\\Omega$ can be determined as $\\\\Delta_{w}$ and the $\\\\Delta$ consists of the modalities in it.  \\n\\nSpecifically, to make $\\\\Delta_{w}$ robust for the randomness of neural network learning, MAR introduces two innovations. Firstly, MAR calculates the prediction discrepancy from the prediction distribution $Y^{d}\\\\in R^{(m+1)\\\\times k}$ instead of $Y^{O}$ ,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l l}{Y^{d}(i,j)=\\\\sum(Y^{D}(i,:)==j)}\\\\\\\\ {Y^{D}=\\\\arg\\\\operatorname*{max}(Y^{O},d i m=2)}\\\\end{array}\\\\right.\\n$$  \\n\\n  \\nFigure 3. The illustration of the proposed MAR.  \\n\\nwhere $j\\\\in[0,k-1]$ . Compared with $Y^{O}$ ,$Y^{d}$ needs only class-wise but not sample-wise consistency. Then the vector discrepancy vector $g^{d}\\\\in R^{m}$ is defined as follows,  \\n\\n$$\\ng^{d}(i)=K L(l o g(\\\\sigma(Y^{d}(i))),\\\\sigma(Y^{d}(0)))\\n$$  \\n\\nwhere $\\\\mathbf{KL}(,)$ means the KL divergence, $i\\\\in[1,m]$ .  \\n\\nSecondly, MAR introduces a memory bank $M^{d}\\\\;\\\\;\\\\in\\\\;\\\\;$ ∈$R^{N\\\\times m}$ to save the $g^{d}$ among the warm-up epochs and performs average filtering to obtain ${\\\\overline{{g}}}^{d}$ ,  \\n\\n$$\\n\\\\overline{{\\\\boldsymbol{g}}}^{d}=\\\\sum_{i=1}^{N}\\\\frac{1}{N}(\\\\boldsymbol{M}^{d})_{i}\\n$$  \\n\\nwhere $(M^{d})_{i}$ is the $g^{d}$ in the $i_{t h}$ epoch. And $\\\\Delta_{w}$ can be determined as $\\\\Delta_{i}$ where $i=a r g m a x(\\\\overline{{g}}^{d})$ .  \\n\\n(b) MAR calculates $L_{M A R}$ from $y^{r}$ ,$\\\\Delta^{d}$ and $\\\\Omega$ . In detail, MAR st cal ulates the weak combination mask $M^{r}\\\\in R^{b}$ from $\\\\Delta^{d}$ and Ω,  \\n\\n$$\\n\\\\left\\\\{\\\\begin{array}{l l}{M^{r}(i)=F A L S E\\\\quad i f\\\\Delta^{d}(i)\\\\notin\\\\Omega}\\\\\\\\ {M^{r}(i)=T R U E\\\\quad i f\\\\Delta^{d}(i)\\\\in\\\\Omega}\\\\end{array}\\\\right.\\n$$  \\n\\nwhere $i\\\\in[0,b-1]$ ,$\\\\Delta^{d}(i)$ is the $\\\\Delta$ for the $i_{t h}$ sample in this mini-batch. Then, the $L_{M A R}$ is defined as follows,  \\n\\n$$\\nL_{M A R}=L_{T L}(y^{r}[M^{r}],l[M^{r}])\\n$$  \\n\\nwhere $l$ is the groundtruth vector for $y^{r}.\\\\quad[.]$ denotes the index operator.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560270347754,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 4. Experiments\\nWe conduct experiments on multimodal classification and segmentation tasks to evaluate the proposed MMANet. In the following, we first compare the MMANet architecture with the state-of-the-art on these two tasks. Then, we ablate the MAD and MAR strategies of MMANet.\\n\\n# 4.1. Performance and Comparison on Multimodal Classification\\nDatasets : We take the face anti-spoofing task as the example of the multimodal classification and conduct experiments on the CASIA-SURF [ 50 ] and CeFA [ 31 ] datasets. Both of them consist of the RGB, Depth, and IR modalities. For CASIA-SURF, we follow the intra-testing protocol suggested by the authors and divide it into train, validation, and test sets with $29\\\\mathbf{k}$ , 1k, and 57k samples, respectively. For CeFA, we follow the cross-ethnicity and cross-attack protocol suggested by the authors and divide it into train, validation, and test sets with $35\\\\mathrm{k}$ , 18k, and $54\\\\mathrm{k}$ samples, respectively. Here we report the results on the test set using the metric of Average Classification Error Rate (ACER) [ 50 ].  \\n\\nTable 2. Performance on the multimodal classification task with CASIA-SURF. $\\\\downarrow$ means that the lower the value, the better the performance.   \\n\\n\\n<html><body><table><tr><td colspan=\"3\">Modalities</td><td colspan=\"7\">ACER(↓)</td></tr><tr><td rowspan=\"2\">RGB</td><td rowspan=\"2\">Depth</td><td rowspan=\"2\">IR</td><td>Customized</td><td colspan=\"5\">Unified</td><td></td></tr><tr><td>SF</td><td>SF-MD</td><td>HeMIS</td><td>LCR</td><td>RFNet</td><td>MMFormer</td><td>MMANet</td></tr><tr><td></td><td>O</td><td>O</td><td>10.01</td><td>11.75</td><td>14.36</td><td>13.44</td><td>12.43</td><td>11.15</td><td>8.57</td></tr><tr><td>O</td><td></td><td>O</td><td>4.45</td><td>5.87</td><td>4.70</td><td>4.40</td><td>4.17</td><td>4.67</td><td>2.27</td></tr><tr><td>O</td><td>O</td><td></td><td>11.65</td><td>16.62</td><td>16.21</td><td>15.26</td><td>14.69</td><td>13.99</td><td>10.04</td></tr><tr><td></td><td></td><td>O</td><td>3.41</td><td>4.61</td><td>3.23</td><td>3.32</td><td>2.23</td><td>1.93</td><td>1.61</td></tr><tr><td></td><td>O</td><td></td><td>6.32</td><td>6.68</td><td>6.27</td><td>5.16</td><td>4.27</td><td>4.77</td><td>3.01</td></tr><tr><td>O</td><td></td><td></td><td>3.54</td><td>4.95</td><td>3.68</td><td>3.53</td><td>3.22</td><td>3.10</td><td>1.18</td></tr><tr><td></td><td></td><td></td><td>1.23</td><td>2.21</td><td>1.97</td><td>1.88</td><td>1.18</td><td>1.94</td><td>0.87</td></tr><tr><td colspan=\"2\">Average</td><td></td><td>5.80</td><td>7.52</td><td>7.18</td><td>6.71</td><td>6.02</td><td>5.93</td><td>3.94</td></tr></table></body></html>  \\n\\nTable 3. Performance on the multimodal classification task with the CeFA dataset.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modalities</td><td colspan=\"3\">ACER(↓)</td></tr><tr><td rowspan=\"3\">RGB Depth</td><td rowspan=\"3\">IR</td><td>Customized</td><td colspan=\"2\">Unified</td></tr><tr><td>SF</td><td>MMFormer</td><td>MMANet</td></tr><tr><td></td><td>28.51</td><td></td></tr><tr><td></td><td>O</td><td>27.44</td><td></td><td>27.15</td></tr><tr><td>O</td><td></td><td>33.75</td><td>33.58</td><td>32.50</td></tr><tr><td>O</td><td>O</td><td>36.17</td><td>39.56</td><td>35.62</td></tr><tr><td></td><td>O</td><td>35.62</td><td>29.47</td><td>22.87</td></tr><tr><td></td><td>O</td><td>31.62</td><td>27.66</td><td>23.27</td></tr><tr><td>O</td><td></td><td>36.62</td><td>32.17</td><td>30.45</td></tr><tr><td></td><td></td><td>24.15</td><td>30.72</td><td>23.68</td></tr><tr><td colspan=\"2\">Average</td><td>32.20</td><td>31.52</td><td>27.94</td></tr></table></body></html>  \\n\\nImplementation : We use random flipping, rotation, and cropping for data augmentation. All models are optimized by an SGD for 100 epochs with a mini-batch 64. The learning rate is initialized to 0.001 with 5 epochs of linear warmup and divided by 10 at 16, 33, and 50 epochs. Weight decay and momentum are set to 0.0005 and 0.9, respectively.  \\n\\nThe hyper-parameters of comparison methods use the suggested ones in the original articles. The $(\\\\alpha,\\\\beta)$ for MMANet is set as (30 ,0 .5) and (30 ,0 .5) for CASIA-SURF and CeFA, respectively. The warm-up epoch $N$ is set as 5.  \\n\\nComparison : Here we compare MMANet with two different unified methods for incomplete multimodal learning. One is an early method that only focuses on extracting modality-invariant features, such as HeMIS [ 17 ] and LCR [ 52 ]. Another is the enhanced method that further considers improving the discrimination ability for single-modal combinations, such as RFNet [ 11 ], and mmFormer [ 51 ].  \\n\\nBesides, we introduce two baseline methods, SF and SF-MD. SF [ 50 ] is the benchmark method of the CASIASURF, which is a customized method that trains the model for each modality combination. SF-MD is the variant of SF by simply adding the Bernoulli indicator after its modality encoder. This enables SF-MD to become a unified model that trains a single model for all modality combinations.  \\n\\nFinally, for a fair comparison, we follow the basic implementation of SF for all the comparison methods. Specifically, we unify the modality encoders of HeMIS, LCR, RFNet, and mmFormer as the ResNet18 used in SF. Besides, we set the SF model trained with complete multimodal data as the teacher network and the SF-MD model as the development network.  \\n\\nResults :Table 2 and Table 3 show the comparison results with the state-of-the-art methods on the CASISSURF and CeFA datasets, respectively. Compared with the second-best unified method, i.e. mmFormer, MMANet decreases the average ACER by $1.99\\\\%$ and $3.58\\\\%$ on the CASIS-SURF and CeFA, respectively. Besides, we can see that MMANet achieves the best performance on both datasets for all the nine modality combinations for CASIASURF. This shows the superiority of our method on the incomplete multimodal classification task. More importantly, MMANet even outperforms the customized baseline method, i.e. SF, for all the modality combinations on the CASIA-SURF and CeFA, decreasing the average ACER by $1.86\\\\%$ and $4.26\\\\%$ . This demonstrates the effectiveness of the proposed MAD and MAR for the incomplete multimodal classification task.  \\n\\nTable 4. Performance on the multimodal segmentation task with NYUv2. $\\\\uparrow$ means that the higher the value, the better the performance.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modality</td><td colspan=\"7\">mIOU(↑)</td></tr><tr><td rowspan=\"2\">RGB</td><td rowspan=\"2\">Depth</td><td colspan=\"6\">Unified</td></tr><tr><td>Customized ESANet</td><td>ESANet-MD</td><td>HeMIS</td><td>LCR</td><td>RFNet</td><td>mmFormer</td><td>MMANet</td></tr><tr><td></td><td></td><td>44.22</td><td>41.34</td><td>33.23</td><td>41.91</td><td>42.89</td><td>43.22</td><td>44.93</td></tr><tr><td></td><td></td><td>40.55</td><td>39.76</td><td>31.23</td><td>39.88</td><td>40.76</td><td>41.12</td><td>42.75</td></tr><tr><td></td><td></td><td>49.18</td><td>47.23</td><td>37.77</td><td>47.46</td><td>48.13</td><td>48.45</td><td>49.62</td></tr><tr><td colspan=\"2\">Average</td><td>44.65</td><td>42.77</td><td>34.07</td><td>43.08</td><td>43.92</td><td>44.26</td><td>45.58</td></tr></table></body></html>  \\n\\nTable 5. Performance on the multimodal segmentation task with the Cityscapes dataset.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">Modality</td><td colspan=\"2\">mIOU(↑)</td></tr><tr><td rowspan=\"2\">RGB Depth</td><td>Customized</td><td colspan=\"2\">Unified</td></tr><tr><td>ESANet</td><td>mmFormer</td><td>MMANet</td></tr><tr><td></td><td>77.60</td><td>76.62</td><td>77.61</td></tr><tr><td></td><td>59.11</td><td>58.53</td><td>60.12</td></tr><tr><td></td><td>78.62 71.77</td><td>78.01</td><td>78.89</td></tr><tr><td colspan=\"2\">Average</td><td>71.05</td><td>72.20</td></tr></table></body></html>\\n\\n# 4.2. Performance and Comparison on Multimodal Segmentation\\nDatasets : We take the semantic segmentation task as the example of multimodal segmentation and conduct experiments on the NYUv2 [ 43 ] and Cityscapes [ 9 ] datasets. Both of them consist of the RGB and Depth modalities. Specifically, NYUv2 contains 1,449 indoor RGB-D images, of which 795 are used for training and 654 for testing. We used the common 40-class label setting. Cityscapes is a large-scale outdoor RGB-D dataset for urban scene understanding. It contains 5,000 finely annotated samples with a resolution of $2048\\\\!\\\\times\\\\!1024$ , of which 2,975 for training, 500 for validation, and 1,525 for testing. Cityscapes also provides 20k coarsely annotated images, which we did not use for training . For both datasets, we report the results on the validation set using the metric of mean IOU (mIOU).  \\n\\nImplementation : We use random scaling, cropping, color jittering, and flipping for data augmentation. All models are optimized by Adam for 300 epochs with a mini-batch 8. The learning rate is initialized with 1e-2 and adapted by the PyTorch’s one-cycle scheduler [ 44 ].  \\n\\nThe hyper-parameters of the comparison methods use the suggested ones in their article.The hyper-parameter $(\\\\alpha,\\\\beta)$ for MMANet is set as $(4,0.2)$ and (10 ,0 .1) for the NYUv2 and Cityscapes datasets, respectively. The warmup epoch $N$ is set as 10.  \\n\\nComparison :We also compare MMANet with the HeMIS [ 17 ], LCR [ 52 ], RFNet [ 11 ], and mmFormer [ 51 ]. Here, we set ESANet and ESANnet-MD as the baseline.  \\n\\nESANet [ 42 ] is an efficient and robust model for RGBD segmentation, which trains the model for each modality combination. ESANet-MD is the variant of ESANet by simply adding the Bernoulli indicator after its modality encoder. ESANet-MD trains only a single model for all modality combinations. Finally, for a fair comparison, we unify the modality encoder of HeMIS, LCR, RFNet, and mmFormer as the ResNet50 with NBt1 used in ESANet. Besides, we set the ESANet model trained with complete multimodal data as the teacher network and the ESANetMD model as the development network.  \\n\\nResults : Table 4 and Table 5 list the comparison results on the NYUv2 and Cityscapes datasets, respectively. From these results, we can see that MMANet achieves the best performance on both datasets for all the modality combinations. In particular, it outperforms the second-best method, mmFormer, by $1.32\\\\%$ and $1.05\\\\%$ in the NYUv2 and Cityscapes datasets, respectively. This demonstrates the effectiveness of the MMANet on the multimodal segmentation task. Moreover, MMANet improves the average performance of ESANet-MD by $2.81\\\\%$ in the NYUv2 dataset and even outperforms the customized baseline, ESANet, by $0.97\\\\%$ and $0.43\\\\%$ in NYUv2 and Cityscapes datasets. This shows the effectiveness of the proposed MAD and MAR on the incomplete multimodal segmentation task.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847560329330156,\n",
       "   'paper_id': '643e0ad00746dc40e3419426',\n",
       "   'paper_title': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# 5. Ablation Study\\nThis section will study the effectiveness of MAD and MAR and conduct extensive ablation experiments on four datasets. Limited by page, we only present the results of the CASIA-SURF dataset and other results can be seen in the supplementary material.\\n\\n# 5.1. The effectiveness of MAD\\nTo study the effect of MAD, we conduct experiments to compare the performance of the vanilla SF-MD and its variant with SP and MAD. Here, SP is the degradation method of MAD that transfers knowledge by directly matching the cosine distance of the sample representations between teacher and deployment networks. The results are shown in Table 6 . We can see that the variant of SF-MD consistently outperforms the vanilla SF-MD in all the modality combination and improve its performance by $1.6\\\\%$ and $2.93\\\\%$ in average. This demonstrates the effectiveness of transferring comprehensive multimodal information from the teacher network to the deployment network. Furthermore, the proposed MAD outperforms SP by $1.33\\\\%$ , which demonstrates the validity of re-weighing sample loss via the classification uncertainty. This is because the classification uncertainty re-weighing can encourage the deployment to focus on the hard samples and thus acquire a more separable inter-class margin than the conventional SP (see Fig. 4 ).  \\n\\n  \\nFigure 4. The prediction distribution of the SF-MD model assisted by SP and MAD on CASIA-SURF dataset. X-axis represents the normalized logit output and $\\\\scriptstyle\\\\mathbf{X}=0.5$ is the classification boundary. orange and blue dots denotes two different classes.  \\n\\nTable 7. Ablation result of MAR on the CASIA-SURF dataset.   \\n\\n\\n<html><body><table><tr><td>RGB</td><td>Depth IR</td><td>SF-MAD +SR</td><td>+MAR</td></tr><tr><td></td><td>O O</td><td>10.36 9.17</td><td>8.57</td></tr><tr><td rowspan=\"3\"></td><td>O</td><td>2.54 1.89</td><td>2.27</td></tr><tr><td>O</td><td>11.67</td><td>10.21 10.04</td></tr><tr><td>O</td><td>1.23</td><td>1.66 1.61</td></tr><tr><td></td><td>O</td><td>4.09</td><td>4.37 3.01</td></tr><tr><td rowspan=\"2\"></td><td></td><td>1.44</td><td>2.12 1.18</td></tr><tr><td></td><td>0.77</td><td>0.92 0.87</td></tr><tr><td colspan=\"2\">Average</td><td>4.57</td><td>4.33 3.94</td></tr></table></body></html>  \\n\\n  \\nFigure 5. (a) The learning process of different modality combinations on the CASIA-SURF dataset during the warm-up stage. (b) Th corresponding $g^{d}\\\\in R^{3}$ for (dashed line) and its average result $\\\\overline{{g}}^{d}$ (solid line) for the warm-up stage.  \\n\\n‘RGB’, ‘IR’ and, $\\\\mathbf{\\\\dot{\\\\rho}}\\\\mathbf{RGB{+}I R}^{\\\\prime}$ . However, SR only focuses on the combinations of single modality, RGB $(1.19\\\\%)$ , IR $(1.46\\\\%)$ , and Depth $(0.65\\\\%)$ , where ‘Depth’ is exactly a strong modality. In contrast, Fig. 5 (b) shows that the prediction discrepancy between $\\\\mathbf{\\\\dot{\\\\rho}}\\\\mathbf{RGB+IR}^{\\\\prime}$ ’ and ‘RGB+Depth+IR’ is the largest. And the performance gain of MAR mainly comes from RGB $(1.79\\\\%)$ , IR $(1.63\\\\%)$ , as well as the combination of RGB and IR $(1.02\\\\%)$ . These results show that MAR can mine the weak modality combinations more accurately and force the deployment network to improve its discrimination ability for them.\\n\\n# 5.2. The effectiveness of MAR\\nTo study the effect of MAR, we conduct experiments to compare the performance of the SF-MAD, namely the SFMD with the MAD, and its variant with SR and MAR. Here SR is the conventional modality regularization strategy considering only the single modality combination. As shown in Table 6 , SR and MAR improve the performance of SFMAD by $0.24\\\\%$ and $0.63\\\\%$ in average, respectively, showing the effeteness to regularize the single and weak modality combinations. Moreover, MAR outperforms SR by $0.39\\\\%$ in average, demonstrating the superiority of MAR.  \\n\\nHere the average gain of SR and MAR is less than SP and MAD since they aim to improve the performance of only the weak, not all combinations. Specifically, as shown in Table 6 , the three worst-performing combinations are\\n\\n# 6. Conclusion\\nThis paper presents an MMANet framework to aid the deployment network for incomplete multimodal learning. Specifically, MMANet introduces a teacher network pretrained with complete multimodal data to transfer the comprehensive multimodal information to the deployment network via MAD. This helps it acquire modality-invariant and specific information while maintaining robustness for incomplete modality input. Besides, MMANet introduces a regularization network to mine and regularize weak modality combinations via MAR. This forces the deployment network to improve its discrimination ability for them effectively and adaptively. Finally, extensive experiments demonstrate the effectiveness of the proposed MMANet, MAD, and MAR for incomplete multimodal learning.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023}],\n",
       " [{'id': 454847717995583366,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects\\nElisa Warner 1 $\\\\circledcirc$ ·Joonsang Lee 1 ·William Hsu 2 ·Tanveer Syeda-Mahmood 3 ·Charles E. Kahn Jr. ·Olivier Gevaert 5 ·Arvind Rao 1  \\n\\nReceived: 30 January 2023 / Accepted: 9 February 2024 / Published online: 23 April 2024   \\n$\\\\circledcirc$ The Author(s) 2024\\n\\n# Abstract\\nMachine learning (ML) applications in medical artificial intelligence (AI) systems have shifted from traditional and statistical methods to increasing application of deep learning models. This survey navigates the current landscape of multimodal ML, focusing on its profound impact on medical image analysis and clinical decision support systems. Emphasizing challenges and innovations in addressing multimodal representation, fusion, translation, alignment, and co-learning, the paper explores the transformative potential of multimodal models for clinical predictions. It also highlights the need for principled assessments and practical implementation of such models, bringing attention to the dynamics between decision support systems and healthcare providers and personnel. Despite advancements, challenges such as data biases and the scarcity of “big data” in many biomedical domains persist. We conclude with a discussion on principled innovation and collaborative efforts to further the mission of seamless integration of multimodal ML models into biomedical practice.  \\n\\nKeywords Machine learning $\\\\cdot$ Multimodal $\\\\cdot$ Representation $\\\\cdot$ Fusion $\\\\cdot$ Translation $\\\\cdot$ Alignment $\\\\cdot$ Co-learning $\\\\cdot$ Artificial intelligence $\\\\cdot$ Data integration',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718043293576,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 1 Introduction\\nMachine learning (ML), the process of leveraging algorithms and optimization to infer strategies for solving learning tasks, has enabled some of the greatest developments in artificial intelligence (AI) in the last decade, enabling the automated segmentation or class identification of images, the ability to answer nearly any text-based question, and the ability to generate images never seen before. In biomedical research, many of these ML models are quickly being applied to medical images and decision support systems in conjunction with a significant shift from traditional and statistical methods to increasing application of deep learning models. At the same time, the importance of both plentiful and well-curated data has become better understood, coinciding as of the time of writing this article with the incredible premise of OpenAI’s ChatGPT and GPT-4 engines as well as other generative AI models which are trained on a vast, well-curated, and diverse array of content from across the internet (OpenAI, 2023 ).  \\n\\nAs more data has become available, a wider selection of datasets containing more than one modality has also enabled growth in the multimodal research sphere. Multimodal data is intrinsic to biomedical research and clinical care. While data belonging to a single modality can be conceptualized as a way in which something is perceived or captured in the world into an abstract digitized representation such as a waveform or image, multimodal data aggregates multiple modalities and thus consists of several intrinsically different representation spaces (and potentially even different data geometries). Computed tomography (CT) and positron emission tomography (PET) are specific examples of single imaging modalities, while magnetic resonance imaging (MRI) is an example itself of multimodal data, as its component sequences T1-weighted, T2-weighted, and fluid-attenuatedinversionrecovery(FLAIR)caneachbeconsidered their own unique modalities, since each of the MR sequences measure some different biophysical or biological property. Laboratory blood tests, patient demographics, electrocardiogram (ECG) and genetic expression values are also common modalities in clinical decision models. This work discusses unique ways that differences between modalities have been addressed and mitigated to improve accuracy of AI models in similar ways to which a human would naturally be able to re-calibrate to these differences.  \\n\\nThere is conceptual value to building multimodal models. Outside of the biomedical sphere, many have already witnessed the sheer power of multimodal AI in text-to-image generators such as DALL ·E 2, DALL ·E 3 or Midjourney (Ramesh et al., 2022 ; Betker et al., 2023 ; Oppenlaender, 2022 ), some of whose artful creations have won competitions competing against humans (Metz, 2022 ). In the biomedical sphere, multimodal models provide potentially more robust and generalizable AI predictions as well as a more holistic approach to diagnosis or prognosis of patients, akin to a more human-like approach to medicine. While a plethora of biomedical AI publications based on unimodal data exist, fewer multimodal models exist due to cost and availability constraints of obtaining multimodal data. However, since patient imaging and lab measurements are decreasing in cost and increasing in availability, the case for building multimodal biomedical AI is becoming increasingly compelling.  \\n\\nWith the emergence of readily-available multimodal data comes new challenges and responsibilities for those who use them. The survey and taxonomy from Baltrusaitis et al. ( 2019 ) presents an organized description of these new challenges, which can be summarized in Fig. 1 : (1) representation, (2) fusion, (3) alignment, (4) translation, (5) co-learning. Representation often condenses a single modality such as audio or an image to a machine-readable data structure such as a vector, matrix, tensor object, or other geometric form, and is concerned with ways to combine more than one modality into the same representation space. Good multimodal representations are constructed in ways in which relationships and context can be preserved between modalities. Multimodal fusion relates to the challenge of how to properly combine multimodal data into a predictive model. In multimodal alignment , models attempt to automatically align one modality to another. In a simple case, models could be constructed to align PPG signals taken at a $60\\\\mathrm{Hz}$ sampling frequency with a $240\\\\mathrm{Hz}$ ECG signal. In a more challenging case, video of colonoscopy could be aligned to an image representing the camera’s location in the colon. Multimodal translation consists of mapping one modality to another. For example, several popular natural language processing (NLP) models attempt to map an image to a description of the image, switchingfromtheimagingdomaintoatext domain. Intranslational medicine, image-to-image translation tends to be the most common method, whereby one easily-obtained imaging domain such as CT is converted to a harder-to-obtain domain such as T1-weighted MRI. Lastly, multimodal co-learning involves the practice of transferring knowledge learned from one modality to a model or data from a different modality.  \\n\\nIn this paper, we use the taxonomical framework from Baltrusaitis et al. ( 2019 ) to survey current methods which address each of the five challenges of multimodal learning with a novel focus on addressing these challenges in medical image-based clinical decision support. The aim of this work is to introduce both current and new approaches for addressing each multimodal challenge. We conclude with a discussion on the future of AI in biomedicine and what steps we anticipate could further progress in the field.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718094149514,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 2 Multimodal Learning in Medical Applications\\nIn the following section, we reintroduce the five common challenges in multimodal ML addressed in Sect. 1 and discuss modern approaches to each challenge as applied to image-based biomedicine. The taxonomical subcategories of Representation and Fusion are summarized in Fig. 2 , while those for Translation, Alignment and Co-learning are summarized in Fig. 3 . A table of relevant works by the challenge addressed and data types used are given in Table 1 .',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718140286860,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 2.1 Representation\\nRepresentation in machine learning typically entails the challenge of transferring contextual knowledge of a complex entity such as an image or sound to a mathematicallyinterpretable or machine-readable format such as a vector or a matrix. Prior to the rise of deep learning, features were engineered in images using techniques such as the aforementioned Scale-Invariant Feature Transform (SIFT) or through methods such as edge detection. Features in audio or other waveform signals such as ECG could be extracted utilizing wavelets or Fourier transform to isolate latent properties of signals and then quantitative values could be derived from morphological patterns in the extracted signal. Multimodal representation challenges venture a step further, consisting of the ability to translate similarities and differences from one modality’s representation to another modality’s representation. For example, when representing both medical text and CT images, if the vector representations for “skull” and “brain” in medical text are closer than those for “skull” and “pancreas”, then in a good CT representation, such relationships between vector representations of these structures in the image should remain preserved. The derivation of “good” representations in multimodal settings have been outlined in Bengioetal.( 2013 )andextendedbySrivastavaandSalakhutdinov ( 2014 ).  \\n\\n  \\nFig. 1 Challenges in multimodal learning: (1) representation, which concerns how multiple modalities will be geometrically represented and how to represent intrinsic relationships between them; (2) fusion, the challenge of combining multiple modalities into a predictive model; (3) translation, involving the mapping of one modality to another; (4) alignment, which attempts to align two separate modalities spatially or temporally; and (5) co-learning, which involves using one modality to assist the learning of another modality  \\n\\nIt is crucial to acknowledge that representation becomes notably challenging when dealing with more abstract concepts. In a unimodal context, consider the task of crafting representations from an image. Beyond pixel intensities, these representations must encapsulate contextual and semantically-proximate information from the image. A simplistic model may fail to encode context adequately, discerning insufficient distinctions between a foreground and background to represent nuanced visual-semantic concepts. Achieving such subtleties in representations, particularly in abstract contexts, poses increased challenges compared to quantifying similarities and differences in less-nuanced data such as cell counts or gene expression.  \\n\\nPrior to delving into multimodal representations, it is instructive to elucidate strategies for crafting proficient unimodal representations, as multimodal approaches often involve combining or adapting multiple unimodal methods. For images, pretrained networks are a common approach for transforming images into good vector representations.  \\n\\n  \\nFig. 2 A graphical representation of the taxonomical sublevels of multo be influenced by each other ( coordinated ). Multimodal fusion can be timodal representation and fusion, and the focus of each challenge. distinguished by whether a model is uniquely constructed to fuse the Multimodal representation can be categorized into whether the repremodalities ( model-based ), or whether fusion occurs before or after the sentationsarejoinedintoasinglevector( joint )orseparatelyconstructed model step ( model-agnostic )  \\n\\nAnother approach is use of autoencoders, which condense image representations into lower-dimensional context vectors that can be decoded to reconstruct the original image. Multimodal autoencoders have been applied to MRI modalities in Hamghalam et al. ( 2021 ) and in this example were also utilized to impute representations for missing modalities.  \\n\\nAnother approach for multimodal representation could be through the use of disentanglement networks , which can separate latent properties of an image into separate vectors. In such cases, an image is given as input and the autoencoder is often split in such a way that two vectors are produced as intermediate pathways, where joining the intermediate vectors should result in the original input. Each intermediate pathway is often constrained by a separate loss function term to encourage separation of each pathway into the desired latent characteristics. In this way, one input image can be represented by two separate vectors, each representing a disjointed characteristic of the image. This disentanglement method has been applied in Jiang and Veeraraghavan ( 2020 )to separate context in CT and MRI from their style so that one modality can be converted in to the other. It was also applied for a single modality in Bône et al. ( 2020 ) to separate “shape” and “appearance” representations of an input, which could potentially be applied to different imaging modalities to extract only similar shapes from each.  \\n\\nWhen two or more vectorized modalities are combined into a model, they are typically combined in one of two ways: (1) joint, or (2) coordinated representations. A joint representation is characterized by aggregation of the vectors at some pointintheprocess,wherebyvectorrepresentationsfromtwo the purpose of the alignment, whether as the goal ( explicit ) or as an intermediate step towards the goal output ( implicit ). In co-learning , a distinction is made between the use of parallel (paired) multimodal data, or non-parallel (unpaired) multimodal data. In co-learning models, one of the modalities is only used in training but does not appear in testing  \\n\\n  \\nFig. 3 A graphical representation of the taxonomical sublevels of multimodal translation, alignment and co-learning, and the focus of each challenge. In translation , models are distinguished based on whether they require use of a dictionary to save associations between modalities (dictionary-based ), or if the associations are learned in a multimodal network ( generative ). In alignment , distinction is made depending on  \\n\\nLiterature relating to the five challenges of multimodal machine learning by the datatype analyzed Table 1   \\n\\n\\n<html><body><table><tr><td rowspan=\"8\">EHR PET Daza et al. (2020) and Zhou et Hamghalam et al. (2021) and Datatype MRI Challenge</td><td rowspan=\"8\">Daza et al. (2020), Sonsbeek and Worring (2020), Zhang et al. (2023) (2020), Yang et al. (2020), Vivar et al. (2020), Liet al. (2021), Bhalodia et al. (2021), Cui et al. (2022) and and Wang et al. (2023) Khosravi et al. (2022) (2020), Zhou et al. Daza etal.( (2020), Hu et al. (2020) and Shin et al. Neubauer et al. (2020) Daza et al. (2020), Neubauer et al. (2020), Yang et al. (2020), (2021) and Jiang and Veeraraghavan (2023) Bhalodia et al. ( Carbonell et al. ( al. (2023) Azcona et al. (2020), Neubauer Zhang et al. (2022), Rudie et Zhang et al. (2023) and Zhou (2020), Wang et al. (25021), al. (2022), Liu et al. (2023), et al. (2020), Zhou et al. Jiang and Veeraraghavan Zhang et al. (2022) et al. (2023) Representation Translation Fusion</td><td rowspan=\"8\">(207) 1e 10 1pue (0707) 1e 1 Su1 (2020) Yang et al. (2020), Leroy et al. (2023), Zhou et al. (2023) and (2020) and Zhu et al. (2020) Xue et al. (2020), Hu et al. Li et al. (2023) Wang et al. (25021) and Zhang (2020), Hu et al. (2020), Guo et al. (2020), Shin et al. pe 1exe1 pue (0707) Nishimoto (2023) et al. (2022) Alignment</td><td rowspan=\"8\">Xing et al. (2022) Jafari et al. (2022) and Dong (2021), Pei et al. (2023), et al. (2022) Varsavsky et al. (2020), Yang et Jafari et al. (2022), Xue et al. al. (2020), Hu et al. (2020), Bui et al. (2020), Hu et al. (2021), Pei et al. (2023), Co-learning</td><td rowspan=\"8\">(2020) and Dong et al. (2022) Datatype</td><td rowspan=\"8\">Zhou et al. (2023) Fundus Genomic Ultrasound Hist Representation Challenge</td><td rowspan=\"8\">Fusion</td><td rowspan=\"8\">Zhou et al. (2023) Habib et al. (2020) (2021) and Cui et al. (2022) Chen et al. (2020), Li et al.</td><td rowspan=\"8\">Leroy et al. (2023) Translation Alignment Co-learning</td></tr><tr><td>(2023), Khosravi et al. (2022)</td></tr><tr><td>Sonsbeek and Worring (2020), Habib et al. (2020), Cui et al. (2020) (2022), Carbonell et al.</td></tr><tr><td>Chen et al. (2020) and Cui et al.</td></tr><tr><td>(2022)</td></tr><tr><td>Chauhan et al. (2020) Xiong et al. (</td></tr><tr><td></td></tr><tr><td>Sonsbeek and Worring (2 X-ray</td></tr><tr><td></td></tr><tr><td>Zhou et al. (2023)</td><td colspan=\"2\">(2020)</td></tr></table></body></html>  \\n\\nseparate modalities are joined together into a single vector form through methods such as aggregation, concatenation or summation. Joint representation is both a common and effective strategy for representation; however, a joint strategy such as concatenation is often constricted to serving in situations where both modalities are available at train- and test-time (one exception using Boltzmann Machines can be found in Srivastava and Salakhutdinov ( 2014 )). If a modality has the potential to be missing, a joint strategy such as aggregation via weighted means could be a better option (Li et al., 2021 ;Chen et al., 2020 ; Zhou et al., 2023 ; Cui et al., 2022 ). Using mathematical notation from Baltrusaitis et al. ( 2019 ), we can denote joint representations $x_{m}$ as the following:  \\n\\n$$\\nx_{m}=f(x_{1},...,x_{n})\\n$$  \\n\\nThis denotes that feature vectors $x_{i},i\\\\,=1...n$ are combined in some way through a function $f$ to create a new representation space $x_{m}$ . By the contrary, coordinated representations are represented as the following:  \\n\\n$$\\nf(x_{1})\\\\sim g(x_{2}),\\n$$  \\n\\nwhereby a function designed to create representations for one modality may be constrained (represented by $\\\\sim$ ) by a similar function from another modality, with the assumption that relationships between data points in the first modality should be relatively well-preserved in the second modality.  \\n\\nJointrepresentationstendtobethemostcommonapproach to representing two or more modalities together in a model because it is perhaps the most straightforward approach. For example, joining vectorized multimodal data together through concatenation before entering a model tends to be one of the most direct approaches to joint representation. Sonsbeek and Worring ( 2020 ), for example, chest x-rays are combined with text data from electronic health records in a vectorized form using a pretrained model first. Then, the vectors from each modality are sent individually through two attention-based blocks, then concatenated into a joint feature space to predict a possible cardiovascular disease and generate a free-text “impression” of the condition. Other joint representation models follow simpler methods, simply extracting baseline features from a pretrained model and concatenating them Daza et al. ( 2020 ); Yang et al. ( 2020 ).  \\n\\nAlthough coordinated representations have traditionally tended to be more challenging to implement, the convenience of neural network architectural and loss adjustments have resulted in increased traction in publications embodying coordinated representations (Xing et al., 2022 ; Wang et al., 2023 ; Chauhan et al., 2020 ; Radford et al., 2021 ; Zhang et al., 2022 ; Bhalodia et al., 2021 ). One of the most notable of these in recent AI approaches is OpenAI’s Contrastive Language-Image Pre-Training (CLIP) model, which forms representations for OpenAI’s DALL ·E 2 (Radford et al., 2021 ; Ramesh et al., 2022 ) and uses a contrastive-learning approach to shape both image embeddings of entire images to match text embeddings of entire captions describing those images. The representations learned from CLIP were demonstrated to not only perform well in zero-shot image-to-text or text-to-image models, but also to produce representations that could outpace baseline supervised learning methods. In a biomedical context, similar models abound, including ConVIRT, a predecessor and forerunner for CLIP (Zhang et al., 2022 ), and related works (Bhalodia et al., 2021 ).  \\n\\nCoordinated approaches are especially useful in colearning. Chauhan et al. ( 2020 ), which employs a subset of co-learning called privileged information, the geometric forms of each modality are not joined into a single vector representation. Instead, network weights are encouraged to produce similar output vectors for each modality and ultimately the same classifications. This constraint warps the space of chest x-ray representations closer to the space of text representations, with the assumption that this coordinated strategy provides chest x-ray representations more useful information because of the text modality. For more on privileged information, see the Sect. 2.5 below.  \\n\\nIn the biomedical sphere, where models are built to prioritize biologically- or clinically-relevant outcomes, quality of representations may often be overlooked or overshadowed by emphasis on optimization of prediction accuracy. However, there is conceptual value in building good multimodal representations. If models are constructed to ensure that similar concepts in different modalities also demonstrate cross-modal similarity, then there is greater confidence that an accurate model is understanding cross-modal relationships. While building good cross-modal representations for indexing images on the Internet like in the CLIP model is a digestible challenge, building similar cross-modal representations for medical data presents a far more formidable challenge due to data paucity. OpenAI’s proprietary WebTextImage dataset, used for CLIP, contains 400 million examples, a sample size that is as of yet unheard of for any kind of biomedical imaging data. Until such a dataset is released, bioinformaticians must often rely on the ability to leverage pretrained models and transfer learning strategies for optimal results amidst low resources to leverage big data for good representations on smaller data.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718185899918,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 2.2 Fusion\\nNext, we discuss challenges in multimodal fusion. This topic is a natural segue from the discussion of representation because many multimodal representations are subsequently fed into a discriminatory model. Multimodal fusion entails the utilization of methods to combine representations from more than one modality into a classification, regression, or segmentation model. According to Baltrusaitis et al. (2019 ), fusion models can be classified into two subcategories: model-agnostic and model-based approaches. The term “model-agnostic” refers to methods for multimodal fusion occurring either before or after the model execution and typically does not involve altering the prediction model itself. Model-agnostic approaches can further be delineated by the stage at which the fusion of modalities occurs, either early in the model (prior to output generation) or late in the model (such as ensemble models, where outputs from multiple models are combined). Additionally, hybrid models, incorporating a blend of both early and late fusion, have been proposed (Carbonell et al., 2023 ). In contrast, a modelbased approach entails special adjustments to the predictive model to ensure it handles each modality uniquely.  \\n\\nWhile model-agnostic methods remain pertinent as useful strategies for multimodal fusion, the overwhelming popularity of neural networks has led to a predominant shift towards model-based methods in recent years. These model-based methods involve innovative loss functions and architectures designed to handle each modality differently. One common model-based fusion strategy is multimodal multiple instance learning (MIL) , where multiple context vectors for each modality are generated and subsequently aggregated into a single representation leading to the output classification. The method for aggregation varies across studies, with attentionbased approaches, emphasizing specific characteristics of each modality, being a common choice (Li et al., 2021 ; Chen et al., 2020 ; Zhou et al., 2023 ; Cui et al., 2022 ).  \\n\\nThe construction of a good model architecture is crucial; however, challenges associated with fusion are often highly contextual, and thus it is important to understand what kinds of data are being utilized in recent models and what problems they try to solve. Most multimodal models understandably incorporate MRI modalities, given that MR images are a natural multimodal medium. Consequently, studies incorporating MRI such as Azcona et al. ( 2020 ), which aims to classify Alzheimer’s Disease severity, and Zhou et al. (2020 ), predicting overall survival in brain tumor patients, exemplify the type of research often prevalent in multimodal image-based clinical application publications. Brain-based ML studies are also popular because of the wide availability of brain images and a strong interest in applying ML models in clinical neuroradiology. However, recent models encompass a myriad of other clinical scenarios predicting lung cancer presence (Daza et al., 2020 ), segmenting soft tissue sarcomas (Neubauer et al., 2020 ), classifying breast lesions (Habib et al., 2020 ), and predicting therapy response (Yang et al., 2020 ), among others, by amalgamating and cross-referencing modalities such as CT images (Daza et al., 2020 ; Neubauer et al., 2020 ), blood tests (Yang et al., 2020 ), electronic health record (EHR) data (Yang et al., 2020 ; Sonsbeek and Worring, 2020 ; Daza et al., 2020 ), mammography images (Habib et al., 2020 ), and ultrasound (Habib et al., 2020 ).  \\n\\nMultimodal fusion models are emerging as the gold standard for clinical-assisted interventions due to the recognition thatdiagnosisandprognosisinreal-worldclinicalsettingsare often multimodal problems. However, these models are not without limitations. For one, standardization across equipment manufacturers or measurement protocols can affect model performance dramatically, and this issue becomes more pronounced as more modalities are incorporated into a model. Second, while fusion models attempt to mimic realworld clinical practice, they face practical challenges that can limit their utility. For instance, physicians may face various roadblocks to obtaining all model input variables due to a lack of permission from insurance companies to perform all needed tests or time constraints. These issues underscore challenges associated with missing modalities, and several studies have attempted to address this concern (Carbonell et al., 2023 ; Zhang et al., 2022 ; Cui et al., 2022 ; Wang et al., 2023 ; Liu et al., 2023 ). However, incorporating mechanisms to account for missing modalities in a model is not yet a common practice for most multimodal biomedical models.  \\n\\nLastly, many models are not configured to make predictions that adapt with additional variables. Most models necessitate all variables to be present at the time of operation, meaning that, even if all tests are conducted, the model can only make a decision once all test results have been obtained. In conclusion, in the dynamic and fast-paced environment of hospitals and other care centers, even accurate models may not be suitable for practical use, unless also coupled with mechanisms to handle missing data.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718231250832,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# 2.3 Translation\\nIn multimodal translation, a model is devised to operate as a mapping entity facilitating the transformation from one modality to another. This involves the conversion of input contextual data, such as CT scans, into an alternative contextual data format, such as MRI scans. Before the rise of modern generative methods leveraging multimodal generative adversarial networks (GANs) or diffusion models to generate one modality from another, translation via dictionary-based methods was common, which typically involved a bimodal dictionary whereby a single entry would contain a key belonging to one modality and a corresponding value belonging to the other modality. Dictionary-based translation was uncommon in biomedical research but popular in NLP fields as a way to convert images into text and vice versa (Liao et al., 2022 ; Reed et al., 2016 ). The current ascendancy of generative models and the availability of associated coding packages have since catalyzed the growth of innovative translational studies applying generative approaches.  \\n\\nPresently, generative models encompass a broad spectrum of potential applications both within and beyond the biomedical domain. Outside the medical sphere, generative models find utility in NLP settings, particularly in text-toimage models like DALL ·E 2 and Midjourney (Liao et al., 2022 ; Ramesh et al., 2022 ; Oppenlaender, 2022 ). Additionally, they are employed in style transfer and other aesthetic computer vision techniques (Huang et al., 2021 ; Cao et al., 2018 ; Zhu et al., 2017 ; Liu et al., 2018 ; Palsson et al., 2018 ;Zhang and Wang, 2020 ). Within the biomedical realm, generative models have proven efficacious in creating virtual stains for unstained histopathological tissues which would typically undergo hemotoxylin/eosin staining (Lu et al., 2021 ). Furthermore, these models serve as prominent tools for sample generation (Tseng et al., 2017 ; Piacentino et al., 2021 ; Choi et al., 2017 ), particularly in scenarios with limited sample sizes (Chen et al., 2021 ). Despite the potential diversity of multimodal translation involving any two modalities, predominant translational efforts in the biomedical realm currently revolve aroundmappingoneimagingmodalitytoanother,aparadigm recognized as image-to-image translation.  \\n\\nIn the contemporary landscape, the integration of simplistic generative models into a clinical context are declining in visibility, while methods employing specialized architecturestailoredtotheinvolvedmodalitiesareacknowledgedfor advancing the state-of-the-art in translational work. Within this context, two notable generative translation paradigms for biomedicine are explored: (1) medical image generation models, and (2) segmentation mask models. In the former, many studies attempt to form models that are bidirectional, whereby the intended output can be placed back as input and return an image similar to the original input image. Bui et al. ( 2020 ), this is resolved by generating deformation fields that map changes in the T1-weighted sequence modality of MRI to the T2-weighted sequence modality. Hu et al. ( 2020 ), separate forward and backward training processes are defined whereby an encoder representing PET images is utilized to understand the underlying distribution of that modality, allowing for more realistic synthetic generated images from MRI. In one unidirectional example, Shin et al. ( 2020 ) modifies a pix2pix conditional GAN network to allow Alzheimer’s disease classification to influence synthetic PET image generation. In another interesting example, Takagi and Nishimoto ( 2023 ) use functional MRI (fMRI) scans and diffusion models to attempt to recreate images of what their subjects had seen. Similarly, diffusion models and magnetoencephalography (MEG) are utilized by Meta for real-time prediction from brain activity of what patients had seen visually (Benchetrit et al., 2023 ).  \\n\\nIn the second potential application, image segmentation models in multimodal image-to-image translation must handle additional challenges, creating both a way to generate the output modality as well as a way to segment it. Jiang and Veeraraghavan ( 2020 ), a generative model converts CT to MRI segmentation. In a reverse problem to image segmentation, Guo et al. ( 2020 ) attempts to synthesize multimodal MRI examples of lesions with only a binary lesion mask and a multimodal MRI Atlas. In this study, six CNN-based discriminators are utilized to ensure the authentic appearance of background, brain, and lesion, respectively, in synthesized images.  \\n\\nMultimodal translation still remains an exciting but formidable challenge. In NLP and beyond, there have been remarkable successes observed in new image generation within text-to-image models beyond the biomedical sphere. However, the adoption of translation models in biomedical work is evolving at a more measured pace, with applications extending beyond demonstrative feasibility to practical utility remaining limited. Arguments in favor of biomedical translation models are predominantly centered around sample generation for datasets with limited sizes, as the generated medical images must adhere to stringent accuracy requirements. Similar to other challenges in multimodal research, translation models would greatly benefit from training on more expansive and diverse datasets. However, with the increasing digitization of medical records and a refined understanding of de-identification protocols and data sharing rights, the evolution of this field holds considerable promise.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718276339602,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 6,\n",
       "   'chunk_text': '# 2.4 Alignment\\nMultimodal alignment involves aligning two related modalities, often in either a spatial or temporal way. Multimodal alignment can be conducted either explicitly as a direct end goal, or implicitly , as a means to the end goal, which could be translation or classification of an input. One example of explicit alignment in a biomedical context is image registration. Leroy et al. ( 2023 ) highlights one approach to multimodal image registration, where histopathology slides are aligned to their $(x,y,z)$ coordinates in a threedimensional CT volume. Another is in Chen et al. ( 2023 ), where surgical video was aligned to a text description of what is happening in the video. On the other hand, an example of multimodal implicit alignment could be the temporal alignment of multiple clinical tests to understand a patients progress over time. Such an analysis was conducted in Yang et al. ( 2020 ), where the authors built a customized multi-layer perceptron (MLP) called SimTA to predict response to therapy intervention at a future time step based on results from previous tests and interventions.  \\n\\nLiterature surrounding alignment has increased since the rise of attention-based models in 2016. The concept of “attention,” which relates to aligning representations in a way that is contextually relevant, is a unimodal alignment paradigm with origins in machine translation and NLP (Bahdanau et al., 2015 ). An example use of attention in NLP could be models which try to learn, based on order and word choice of an input sentence, where the subject of the sentence is so that the response can address the input topic. In imaging, attention can be used to highlight important parts of an image that are most likely to contribute to a class prediction. Vaswani et al. (2017 ), introduced a more sophisticated attention network, named transformers, an encoder-decoder-style architecture based on repeated projection heads where attention learning takes place. Transformers and attention were originally applied to natural language (Vaswani et al., 2017 ; Bahdanau et al., 2015 ; Devlin et al., 2019 ) but have since been applied to images (Parmar et al., 2018 ; Dosovitskiy et al., 2021 ), including histopathology slides (Lu et al., 2021 ; Chen et al., 2020 ) and protein prediction (Tunyasuvunakool et al., 2021 ). Multimodal transformers were introduced in 2019, also developed for the natural language community (Tsai et al., 2019 ). While these multimodal transformers do not contain the same encoder-decoder structure of a traditional transformer architecture, they are hallmarked by crossmodal attention heads, where one modality’s sequences intermingle with another modality’s sequences.  \\n\\nAlthough typical transformers themselves are not multimodal, they often constitute in multimodal models. The SimTA network mentioned above borrowed the positional encoding property of transformers to align multimodal inputs in time to predict therapy response (Yang et al., 2020 ). Many models taking advantage of visual transformers (ViT) have also utilized pretrained transformers trained on images for multimodal fusion models. In both the TransBTS (Wang et al., 25021 ) and mmFormer models (Zhang et al., 2022 ), a transformer is utilized on a vector composed of an amalgamation of information from multiple modalities of MRI, whichmayimplythatthetransformerattentionheadshereare aligning information from multiple modalities represented via aggregate latent vectors. The ultimate function of transformers is a form of implicit alignment, and it can be assumed here that this alignment is multimodal.  \\n\\nTransformer models have brought a new and largely successful approach to alignment, sparking widespread interest in their applications in biomedical use. Transformers for NLP have also engendered new interest in Large Language Models (LLMs), which are already being applied to biomedical contexts (Tinn et al., 2023 ) and probing new questions about its potential use as a knowledge base for biomedical questions (Sung et al., 2021 ).\\n\\n# 2.5 Co-learning\\nIn this last section exploring recent research in multimodal machine learning, the area of co-learning is examined, a field which has recently garnered a strong momentum in both unimodal and multimodal domains. In multimodal colearning, knowledge learned from one modality is often used to assist learning of a second modality. This first modality which transfers knowledge is often leveraged only at traintime but is not required at test-time. Co-learning is classified in Baltrusaitis et al. ( 2019 ) as either parallel or non-parallel. In parallel co-learning, paired samples of modalities which share the same instance are fed into a co-learning model. By contrast, in non-parallel co-learning, both modalities are included in a model but are not required to be paired.  \\n\\nWhile co-learning can embody a variety of topics such as conceptual grounding and zero-shot learning, this work focuses on the use of transfer learning in biomedicine. In multimodal transfer learning , a model trained on a higher quality or more plentiful modality is employed to assist in the training of a model designed for a second modality which is often noisier or smaller in sample size. Transfer learning can be conducted in both parallel and non-parallel paradigms. This work focuses on one parallel form of transfer learning called privileged learning, and one non-parallel form of transfer learning called domain adaptation. A visual representation of these approaches be seen in Fig. 4 .',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718321428372,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 7,\n",
       "   'chunk_text': '# 2.5.1 Privileged Learning\\nPrivileged learning originates from the mathematician Vladmir Vapnik and his ideas of knowledge transfer with the support vector machine for privileged learning $\\\\scriptstyle(\\\\mathrm{SVM}+)$ model (Vapnik and Vashist, 2009 ). The concept of privileged learning introduces the idea that predictions for a lowsignal, low-cost modality can be assisted by incorporating a high-signal, high-cost modality (privileged information) in training only, while at test-time only the low-cost modality is needed. Vapnik and Vashist ( 2009 ), Vapnik illustrates this concept through the analogy of a teacher (privileged information) distilling knowledge to a student (low-cost modality) before the student takes a test. Although a useful concept, the field is relatively under-explored compared to other areas of co-learning. One challenge to applying privileged learning models was that Vapnik’s $\\\\mathrm{SVM+}$ model was one of few available before the widespread use of neural networks. Furthermore, it demands that the modality deemed “privileged” must confer high accuracy on its own in order to ensure that its contribution to the model is positive. Since then, neural networks have encouraged newer renditions of privileged information models that allow more flexibility of use (Lambert et al., 2018 ; Shaikh et al., 2020 ; Sabeti et al., 2021 ).  \\n\\nRecently, privileged learning has emerged as a growing subset of biomedical literature, and understandably so. Many multimodal models today require health care professionals to gather a slew of patient information and are not trained to handle missing data. Therefore, the ability to minimize the number of required input data while still utilizing the predictive power of multiple modalities can be useful in real-world clinical settings. Hu et al. ( 2020 ) for example, the authors attempt to train a segmentation network where at train-time the “teacher network” contains four MR image modalities, but at test-time the “student network” contains only T1-weighted images, the standard modality used in preoperative neurosurgery and radiology. Chauhan et al. ( 2020 ), chest x-rays and written text from their respective radiology reports are used to train a model where only chest x-rays are available at test-time.  \\n\\n  \\nFig. 4 Two types of transfer learning described in this work are privileged learning (top) and domain adaptation (bottom). In privileged learning, a plentiful set consisting of data which is normally of low cost but also low signal-to-noise ratio is available in both training and testing, while a limited gold-standard quality set is used for training only. In this example, the plentiful set is used to train the target model, while the limited set constrains the model parameters to increase the   \\nmodel’s ability to associate the low-cost modality with the ground truth. In domain adaptation, there is a target dataset which consists of a few samples and a source dataset consisting of plenty of samples. If the target data is too small to build a reliable model in training, source data can be augmented to make the model more robust. Else, the target model could be trained with few examples, while a second source model is used to help make the target model more generalizable  \\n\\nIn privileged models based on traditional approaches (before deep neural networks), privileged information can be embedded in the model either through an alteration of allowable error (“slack variables” from $\\\\mathrm{SVM+}$ ) (Vapnik and Vashist, 2009 ), or through decision trees constructed with non-privileged features to mimic the discriminative ability of privileged features (Random Forest+) (Warner et al., 2022 ;Moradi et al., 2016 ). In a deep learning model, privileged learning is often achieved through the use of additional loss functions which attempt to constrain latent and output vectors from the non-privileged modality to mimic those from the combined privileged and non-privileged models (Hu et al., 2020 ; Xing et al., 2022 ). For example, in Chauhan et al. (2020 ), encoders for each modality are compared and cross entropy loss is calculated for each modality separately. The sum of these allows the chest x-ray network to freely train for only the chest $\\\\mathbf{X}$ -ray modality while being constrained through the overall loss function to borrow encoding methods from the text network, which also strives to build an accurate model.  \\n\\nWhile privileged learning models can be applied where data is missing, users should heed caution when applying models in situations where there is systematic bias in reporting. Those who train privileged models without considering subject matter may inadvertently be choosing to include all their complete data in training and their incomplete data in testing. However, in clinical scenarios, data are often incomplete because a patient either did not qualify for a test (perhaps their condition was seen as not “dire enough” to warrant a test) or their situation was too serious to require a test (for example, a patient in septic shock may not pause to undergo a chest x-ray because they are in the middle of a medical emergency). Therefore, while applying data to highly complex models is a common approach in computer science, the context of the data and potential underlying biases need to be considered first to ensure a practical and well-developed model.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718365992854,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 8,\n",
       "   'chunk_text': '# 2.5.2 Domain Adaptation\\nDomain adaptation has been shown to be useful in biomedical data science applications where a provided dataset may be too small or costly to utilize for more advanced methods such as deep learning, but where a somewhat similar (albeit larger) dataset can be trained by such methods. The smaller dataset for which we want to train the model is called the “target” dataset and the larger dataset which will be used to assist the model with the learning task and provide better contextualization is called the “source” dataset. Domain adaptation strategies are often tailored to single modalities such as camera imaging or MRI, where measurements of an observed variable differ based on an instrument’s postprocessing techniques or acquisition parameters (Xiong et al., 2020 ; Varsavsky et al., 2020 ; Yang et al., 2020 ). However, the distinct characteristics arising from disparate instruments or acquisition settings can lead to considerable shifts in data distribution and feature representations, mirroring the challenges faced in true multimodal contexts. Therefore, the discussion of uni-modal domain adaptation is a relevant starting point for multimodal domain adaptation, as it covers approaches to mitigate significant deviations within data that may seem similar but are represented differently. Additionally, understanding how to mitigate the impact of such variations helps one to understand ways to construct multimodal machine learning systems that confront similar challenges. We also discuss relevant multimodal domain adaptation approaches in biomedicine, which have typically consisted of applying CT images as a source domain to train an MRI target model or vice versa (Chiou et al., 2020 ; Xue et al., 2020 ; Pei et al., 2023 ; Jafari et al., 2022 ; Dong et al., 2022 ).  \\n\\nOne way to train a model to adapt to different domains is through augmentation of the input data, which “generalizes” the model to interpret outside of the domain of the original data. Xiong et al. ( 2020 ), a data augmentation framework for fundus images in diabetic retinopathy (DR) is proposed to offset the domain differences of utilizing different cameras. The authors show that subtracting local average color, blurring, adaptive local contrast enhancement, and a specialized principal component analysis (PCA) strategy can increase both $R^{2}$ values for age prediction and DR classification area under the receiver operating curve (AUROC) on test sets where either some domain information is known a priori and also where no information is known, respectively. In another method which attempts to augment the source domain into more examples in the target style, Chiou et al. ( 2020 ) split the source image into latent content and style vectors, using the content vectors in a style-transfer model reminiscent of cycleGAN to feed as examples with the target domain into a segmentation network (Zhu et al., 2017 ). In other applications, data augmentation for domain generalization may be executed utilizing simpler affine transformations (Varsavsky et al., 2020 ). This demonstrates the utility of data augmentation strategies in more broadly defining decision boundaries where target domains differ from the source.  \\n\\nA second strategy for domain adaptation involves constraining neural network functions trained on a target domain by creating loss functions which require alignment with a source domain model. Varsavsky et al. ( 2020 ), a framework for adapting segmentation models at test-time is proposed, whereby an adversarial loss trains a target-based U-Net to be as similar to a source-based U-Net as possible. Then a paired-consistency loss with adversarial examples is utilized to fine-tune the decision boundary to include morphologically similar data points. In a specificially multimodal segmentation-based model, Xue et al. ( 2020 ) attempts to create two side-by-side networks, a segmenter and an edge generator, which both encourage the source and target output to be as similar as possible to each other. In the final loss function, the edge generator is used to constrain the segmenter in such a way as to promote better edge consistency in the target domain. In yet another, simpler example, domain adaptation to a target domain is performed in Hu et al. ( 2021 ) by taking a network trained on the source domain and simply adjusting the parameters of the batch normalization layer.  \\n\\nDomain adaptation in biomedicine can be a common problem where instrument models or parameters change. Among multimodal co-learning methods, most networks are constructed as segmentation networks for MRI and CT because they are similar imaging domains, although measuring different things. While CT carries distinct meaning in its pixels (measured in Hounsfield Units), MRI pixel intensities are not standardized and usually require normalization, which could pose challenges to this multimodal problem. Additionally, MRI carries much more detail than CT scans, which necessitates the model to understand contextual boundaries of objects much more than a unimodal case with only CT or MRI.',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718411343768,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 9,\n",
       "   'chunk_text': '# 3 Discussion\\nThe rapidly evolving landscape of artificial intelligence (AI) both within the biomedical field and beyond has posed a substantial challenge in composing this survey. Our aim is to provide the reader with a comprehensive overview of the challenges and contemporary approaches to multimodal machine learning in image-based, clinically relevant biomedicine. However, it is essential to acknowledge that our endeavor cannot be fully comprehensive due to the dynamic nature of the field and the sheer volume of emerging literature within the biomedical domain and its periphery. This robust growth has led to a race among industry and research institutions to integrate the latest cutting-edge models into the healthcare sector, with a particular emphasis on the introduction of “large language models” (LLMs). In recent years, there has been an emergence of market-level insights into the future of healthcare and machine learning, as exemplified by the incorporation of machine learning models into wearable devices such as the Apple Watch and Fitbit devices for the detection of atrial fibrillation (Perino et al., 2021 ; Lubitz et al., 2022 ). This begs the question: where does this transformative journey lead us?  \\n\\nHealthcare professionals and physicians already embrace the concept of multimodal cognitive models in their diagnostic and prognostic practices, signaling that such computer models based on multimodal frameworks are likely to endure within the biomedical landscape. However, for these models to be effectively integrated into clinical settings, they must exhibit flexibility that aligns with the clinical environment. If the ultimate goal is to seamlessly incorporate these AI advancements into clinical practice, a fundamental question arises: how can these models be practically implemented on-site? Presently, most available software tools for clinicians are intended as auxiliary aids, but healthcare professionals have voiced concerns regarding the potential for increased computational workload, alert fatigue, and the limitations imposed by Electronic Health Record (EHR) interfaces (Ruiter et al., 2015 ; Ancker et al., 2017 ). Therefore, it is paramount to ensure that any additional software introduced into clinical settings serves as an asset rather than a hindrance.  \\n\\nAnother pertinent issue emerging from these discussions pertains to the dynamics between clinical decision support systems (CDSS) and healthcare providers. What occurs when a computer-generated recommendation contradicts a physician’s judgment? This dilemma is not new, as evidenced by a classic case recounted by Evans et al. ( 1998 ), where physicians were granted the choice to either follow or disregard a CDSS for antibiotic prescription. Intriguingly, the group provided with the choice exhibited suboptimal performance compared to both the physician-only and computer-only groups. Consequently, it is unsurprising that some healthcare professionals maintain a cautious approach to computer decision support systems (Adamson and Welch, 2019 ; Silcox et al., 2020 ). Questions arise regarding the accountability of physicians if they ignore a correct computer-generated decision and the responsibility of software developers if a physician follows an erroneous computer-generated recommendation.  \\n\\nA pivotal ingredient notably under-represented in many CDSS models, which could help alleviate discrepancies between computer-generated and human decisions, is the incorporation of uncertainty quantification, grounded calibration, interpretability and explainability. These factors have been discussed in previous literature, underscoring the critical role of explainability in ensuring the long-term success of CDSS-related endeavors (Reddy, 2022 ; Khosravi et al., 2022 ; Kwon et al., 2020 ; Abdar et al., 2021 ).  \\n\\nThe domain of multimodal machine learning for medically oriented image-based clinical support has garnered increasing attention in recent years. This interest has been stimulated by advances in computer science architecture and computing hardware, the availability of vast and publicly accessible data, innovative model architectures tailored for limited datasets, and the growing demand for applications in clinical and biomedical contexts. Recent studies have showcased the ability to generate synthetic images in one modality based on another (as outlined in Sect. 2.3 ), align multiple modalities (Sect. 2.4 ), and transfer latent features from one modality to train another (Sect. 2.5 ), among other advancements. These developments offer a promising outlook for a field that is still relatively new. However, it is also imperative to remain vigilant regarding the prevention of data biases and under-representation in ML models to maximize the potential of these technologies.  \\n\\nDespite these promising developments, the field faces significant hurdles, notably the lack of readily available “big data” in the medical domain. For instance, the routine digitization of histopathology slides remains a challenging goal in many healthcare facilities. Data sharing among medical institutions is fraught with challenges around appropriate procedures for the responsible sharing of patient data under institutional, national and international patient privacy regulations.  \\n\\nAdvancing the field will likely entail overcoming these hurdles, ensuring more extensive sharing of de-identified data from research publications and greater participation in establishment of standardized public repositories for data. Dissemination of both code and pretrained model weights would also enable greater knowledge-sharing and repeatability. Models that incorporate uncertainty quantification, explainability, and strategies to account for missing data are particularly advantageous. For more guidance on building appropriate multimodal AI models in healthcare, one can refer to the World Health Organization’s new ethics and governance guidelines for large multimodal models (World Health Organization, 2024 ).  \\n\\nIn conclusion, the field of multimodal machine learning in biomedicine has experienced rapid growth in each of its challenge areas of representation, fusion, translation, alignment, and co-learning. Given the recent advancements in deep learning models, escalating interest in multimodality, and the necessity for multimodal applications in healthcare, itislikelythatthefieldwillcontinuetomatureandbroadenits clinical applications. In this ever-evolving intersection of AI and healthcare, the imperative for responsible innovation resonates strongly. The future of multimodal machine learning in the biomedical sphere presents immense potential but also mandates a dedication to ethical principles encompassing data privacy, accountability, and transparent collaboration between human professionals and AI systems. As we navigate this transformative journey, the collective effort, ethical stewardship, and adherence to best practices will ensure the realization of the benefits of AI and multimodal machine learning, making healthcare more efficient, accurate, and accessible, all while safeguarding the well-being of patients and upholding the procedural and ethical standards of clinical practice.  \\n\\nAuthor Contributions E.W. contributed the main writing of the paper. This paper concept was formulated by W.H., T.S.M., O.G., and J.L., A.R., W.H., T.S.M., C.E.K., and A.R. contributed ideas and direction for the writing and assisted in the proofreading and selection of the concepts and papers covered.  \\n\\nFunding E.W and A.R are greateful for support from NIH grant R37CA214955-01A1. All authors are grateful to support from the AMIA Biomedical Image Informatics Working group.  \\n\\nData Availability No data outside of those referenced has been used in this survey. Key papers have been summarized in Table 1 .',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454847718454859674,\n",
       "   'paper_id': '65499d88939a5f4082be99ae',\n",
       "   'paper_title': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects',\n",
       "   'chunk_id': 10,\n",
       "   'chunk_text': '# Declarations\\nConflict of Interest The Authors declare no competing financial interests but the following competing non-financial interests: A.R. serves as a member for Voxel Analytics, LLC. C.E.K.’s institution receives salary support for service as editor of Radiology: Artificial Intelligence.  \\n\\nOpen Access This article is licensed under a Creative Commons Attribution 4.0 International License, which permits use, sharing, adaptation, distribution and reproduction in any medium or format, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons licence, and indicate if changes were made. The images or other third party material in this article are included in the article’s Creative Commons licence, unless indicated otherwise in a credit line to the material. If material is not included in the article’s Creative Commons licence and your intended use is not permitted by statutory regulation or exceeds the permitteduse,youwillneedtoobtainpermissiondirectlyfromthecopyright holder. To view a copy of this licence, visit http://creativecomm ons.org/licenses/by/4.0/ .',\n",
       "   'original_filename': 'Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db',\n",
       "   'year': 2024}],\n",
       " [{'id': 454849661261518532,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# All in One Framework for Multimodal Re-identification in the Wild\\nHe Li 1 Mang $\\\\mathrm{Ye^{1*}}$ Ming Zhang 2 Bo Du 1 1 National Engineering Research Center for Multimedia Software, Institute of Artificial Intelligence, School of Computer Science, Hubei Luojia Laboratory,Wuhan University, Wuhan, China, 2 Guangzhou Urban Planning Design Survey Research Institute, Guangzhou, China Challenges in ReID\\n\\n# Abstract\\nIn Re-identification (ReID), recent advancements yield noteworthy progress in both unimodal and cross-modal retrieval tasks. However, the challenge persists in developing a unified framework that could effectively handle varying multimodal data, including RGB, infrared, sketches, and textual information. Additionally, the emergence of large-scale models shows promising performance in various vision tasks but the foundation model in ReID is still blank. In response to these challenges, a novel multimodal learning paradigm for ReID is introduced, referred to as All-in-One (AIO), which harnesses a frozen pre-trained big model as an encoder, enabling effective multimodal retrieval without additional fine-tuning. The diverse multimodal data in AIO are seamlessly tokenized into a unified space, allowing the modality-shared frozen encoder to extract identity-consistent features comprehensively across all modalities. Furthermore, a meticulously crafted ensemble of cross-modality heads is designed to guide the learning trajectory. AIO is the first framework to perform allin-one ReID, encompassing four commonly used modalities. Experiments on cross-modal and multimodal ReID reveal that AIO not only adeptly handles various modal data but also excels in challenging contexts, showcasing exceptional performance in zero-shot and domain generalization scenarios. Code will be available at: https: //github.com/lihe404/AIO .  \\n\\n  \\nFigure 1. Illustration of the proposed AIO and existing methods. (a) Existing ReID methods [ 7 ,68 ,86 ] independently learn the cross-modal ReID models, incapable of handling the uncertain input modalities in real-world scenarios. (b) Our proposed AIO framework exhibits the capability to proficiently manage diverse combinations of input modalities, thus addressing the inherent uncertainties prevalent in practical deployment scenarios.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661321287366,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 1. Introduction\\nPerson Re-identification (ReID) aims to retrieve a target person captured by multiple non-overlapping cameras [ 69 ]. It is widely used in intelligent surveillance, security, and many other fields. ReID has been deeply studied in recent years and achieves human-level performance in both unimodal and cross-modal retrieval tasks [ 1 ,21 ,68 ,77 ].  \\n\\nExisting works are capable of retrieving between RGB images or leveraging different modalities of the query ( e.g .infrared (IR), sketch, or text) to find the person in RGB images [ 13 ,22 ,23 ,64 ,72 ]. However, RGB images are susceptible to environmental light fluctuations, while IR and sketch images lack vital color information crucial for ReID tasks. The adage “ a picture is worth a thousand words ” underscores the ease of accessing textual information, though it falls short in providing intricate visual details [ 51 ]. Moreover, as shown in Fig. 1 (a) and Tab. 1 , existing cross-modal methods are confined to specific paired modalities and models during training, rendering them unable to handle diverse input modalities effectively. Consequently, the generalizability of these methods to unseen modalities is severely hampered, a significant hurdle given the uncertainty of the modality in real-world query scenarios. This lack of adaptability across modalities severely constrains the practical applicability of existing methods in a practical real-world deployment with uncertain testing environments. Thus, 1) How to improve the generalizability of modality is a significant challenge?  \\n\\nTable 1. Comparison of AIO and existing methods on cross/multi-modality retrieval. The number in (·)in the “Multi” column indicates the number of support modalities at inference time.   \\n\\n\\n<html><body><table><tr><td>Method</td><td>RGB</td><td>IR</td><td>Sketch</td><td>Text</td><td>Multi</td></tr><tr><td>NFormer [55] r [32] DC-Former AGW [69] [65] DART</td><td></td><td>× × √</td><td>× X × × √</td><td>× × × ×</td><td>X</td></tr><tr><td>Gui et al. [17] SketchTrans [6] APTM [66]</td><td></td><td></td><td>√ X ×</td><td>√</td><td>×</td></tr><tr><td>BiLMa a[13] TriReID [75]</td><td></td><td></td><td>√</td><td>√</td><td></td></tr><tr><td>UNIReID [7] AIO</td><td></td><td></td><td></td><td></td><td>(3) (3) (4)</td></tr></table></body></html>  \\n\\nMeanwhile, in real-world scenarios, individuals of interest frequently encounter unknown environments that are not learned during training, i.e ., zero-shot ReID in the wild. Existing methods explore domain generalizability based on a single modality, which fails to handle multi-modal zero-shot retrieval. Recently, foundation large models have shown their power in diverse language and vision tasks. Pioneering models such as CLIP [ 48 ] and CoCa [ 73 ], exemplify the prowess of large-scale pre-trained foundation models as robust zero-shot performers. This characteristic holds significant relevance for ReID tasks. Despite the existence of several large-scale ReID pre-trained models [ 11 ,41 ,66 ], their zero-shot performance falls short of expectations. Typical down-stream fine-tuning or training strategies would be too resource-demanding in a new challenging scenarios, e.g ., data collection and annotation. Moreover, the cost of training a large-scale foundation model is too high to afford for most researchers and small companies. Thus, 2) Is there a straightforward method to utilize extensive pretrained foundational models for improving zero-shot performance in ReID with uncertain modalities?  \\n\\nTo address the aforementioned issues, we introduce an innovative All-in-One (AIO) framework to tackle the challenges inherent in zero-shot multimodal ReID. As shown in Fig. 1 (b), the key idea of our work is to explore the potential of leveraging transformer-based foundation models to address uncertain multimodal retrieval, enhancing zero-shot ability in multimodal ReID, e.g . any combination of RGB, IR, sketch, text, or simple cross-modal retrieval. AIO represents an experimental effort, being the first framework capable of simultaneously accommodating all four commonly used modalities in different ReID tasks concurrently.  \\n\\nIn order to achieve the above goal, AIO firstly designs a lightweight multimodal tokenizer to unify diverse data. It is followed by a frozen foundation model that serves as a shared feature encoder, extracting a generalized semantic representation across all modalities and improving zero-shot performance. Then, to guide cross-modal and multimodal feature learning, AIO proposes several crossmodal heads which contain: a) A Conventional Classification Head is utilized as the foundation of the learning guidance, learning identity-invariant representations; $b_{.}$ ) Vision Guided Masked Attribute Modeling is introduced to learn fine-grained features and build a relationship between text and images; c) Multimodal Feature Binding is utilized to close features of diverse modalities together.  \\n\\nFurthermore, the acquisition of multimodal data in realworld scenarios poses considerable challenges, particularly concerning IR and Sketch images. The shortage of IR cameras and the substantial human labor involved in sketch drawing contribute to this difficulty. Existing multimodal learning methods [ 47 ,57 ] demand paired multimodal, a pre-requisite not consistently met in realistic environments. In addressing the absence of certain modalities in multimodal learning, except the proposed Multimodal Feature Binding, integrates synthetic augmentation strategies, CA [72 ] and Lineart [ 53 ], to generate synthetic IR and Sketch images respectively. CA and Lineart have been shown to effectively diminish the domain gap between RGB-IR and RGB-Sketch modalities. Their utility extends to acting as a bridge that connects feature representations of the same target across diverse modalities, thereby facilitating the reduction of the modality gap [ 54 ,72 ].  \\n\\nComprehensive experiments across various zero-shot cross-modal and multimodal ReID scenarios, involving all four modalities, are conducted to evaluate the performance of the proposed framework. We also explore different foundation models and multimodal input data combinations to assess the versatility of AIO. The proposed framework demonstrates remarkable performance on multimodal ReID tasks and competitive performance on cross-modal ReID tasks without additional fine-tuning, highlighting its potential as a robust zero-shot solution for complex multimodal ReID tasks. In summary, our contributions are three-fold:  \\n\\n• We identify a critical limitation in existing cross-modal ReID that they lack generalizability to novel modalities, coupled with poor zero-shot performance. • We design a novel multimodal ReID framework, which innovatively integrates a pre-trained foundation model  \\n\\nand a multimodal tokenizer into ReID tasks, complemented a missing modality synthesis strategy, and three cross-modal heads to learn a unified multimodal model. • We perform extensive and comprehensive experiments to demonstrate that our AIO framework is capable of handling uncertain input modalities for ReID tasks, achieving competitive performance on zero-shot cross-modal and multi-modal ReID tasks.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661386036936,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 2. Related Work\\n\\n# 2.1. Cross-modal ReID\\nPerson Re-identification (ReID) can be classified into single-modal [ 18 ,27 ,40 ,79 ] ReID and cross-modal ReID [7 ,62 ]. Specifically, cross-modal ReID considers special cases in which RGB images of the target are not available but non-RGB modalities, such as infrared (IR) [63 ,65 ,68 ,71 ], sketch [ 6 ,17 ], or description of the person [ 15 ,31 ,51 ,66 ,86 ] could be leveraged to enlarge the application area of ReID technology.  \\n\\nIn real-world scenarios, the RGB image of the target may not be directly available. On the one hand, the crime often happens at night, when RGB cameras cannot capture highquality images for ReID but IR cameras could give relatively good images of the person [ 69 ]. Thus, adopting IR images for target retrieval enters the picture. Zhang et al .[77 ] proposes a feature-level modality compensation network to compensate for the missing modality-specific information at the feature level to help the model learn discriminative features. Wu et al . [ 61 ] leverage unlabeled data and propose a progressive graph matching method to learn a relationship between IR and RGB images to alleviate the high cost of annotation. On the other hand, the natural language descriptions of the witness or the sketch drawn based on the textual descriptions are easy to access. Pang et al . [ 46 ]first design an adversarial learning method to learn domaininvariant features cross sketch and RGB. Zhai et al . [ 75 ]introduce a multimodal method that combines both sketch and text as queries for retrieval.  \\n\\nHowever, in the real world, the modality of the given target information is uncertain. The aforementioned works could only handle exactly two modalities, which hinders the applicability of these methods. To alleviate this problem, Chen et al . [ 7 ] proposes a modality-agnostic retrieval method that leverages RGB, sketch, and text modalities to learn modality-specific features and fuse them based on different uni/multimodal tasks. The proposed method could handle any combination of three learned modalities, expanding usage scenarios and reducing limitations. Nevertheless, it does not take IR into account and requires a complex design that lacks scalability. Different from that, the architecture of the proposed AIO is simple and it is easy to be extended to more modalities.\\n\\n# 2.2. Multimodal Learning\\nMultimodal learning methods aim to utilize the complementary properties of various modalities to learn the semantics of a task [ 34 ]. Recently, multimodal transformers [ 14 ,24 ,29 ] have emerged as unified models that fuse different modality inputs with token concatenation rather than extracting modality-specific and cross-modality representations. However, most multimodal learning methods [3 ,47 ,57 ] are designed based on the assumption of the completeness of modality for training or inference, which is not always held in the real world. To face this challenge, some researchers [ 4 ,25 ,45 ,56 ] explore building multimodal methods that could handle missing modalities. ImageBind [ 16 ] projects all features of different modalities into the same feature space and leverages contrastive learning to align all modalities to a based modality. SMIL [42 ] proposes to estimate the latent features of the missing modality via Bayesian Meta-Learning. GCNet [ 33 ] introduces graph neural network-based modules to capture temporal and speaker dependencies and jointly optimize classification and reconstruction tasks. Similar to previous work, the proposed AIO aspires to project features from diverse modalities into a unified feature embedding. Notably, we capitalize on the inherent capabilities of the transformer, adept at handling variable input lengths. This strategy enables the model to seamlessly accommodate inputs originating from any combination of modalities, enhancing the flexibility and adaptability of the AIO framework.\\n\\n# 2.3. Foundation Model\\nFoundation models are designed to be adapted to various downstream tasks by pre-training on broad data at scale [2 ]. The efficacy of large-scale pre-trained models is evident in their capacity to enhance data encoding and elevate the performance of downstream tasks [ 20 ,74 ]. Recent investigations [ 43 ,49 ,50 ,67 ] reveal the notable emergent ability exhibited by most large-scale pre-trained foundation models, particularly in the context of robust zeroshot performance. CLIP [ 48 ] focuses on multimodal contrastive learning on noisy web image-text pairs to learn aligned image and text representation. Impressively, CLIP achieves accuracy comparable to the original ResNet-50 on ImageNet zero-shot, without exposure to any samples from ImageNet. DALL that autoregressively models sufficient and large-scale text ·E [ 49 ] introduces a simple approach and image tokens and demonstrates commendable zero-shot performance when compared to preceding domain-specific models. Nonetheless, the foundation model within ReID remains in a nascent state. The suboptimal zero-shot performance of extant large-scale pre-trained models persists due to challenges in data acquisition. Motivated by the impressive zero-shot capabilities exhibited by foundation models, the proposed AIO framework strategically employs frozen pre-trained large-scale foundation models as feature extractors, which aims to imbue our framework with the ability to learn generalized semantics from a diverse range of modalities, thereby enhancing its zero-shot performance.  \\n\\n  \\nFigure 2. The schematic of the proposed AIO framework. VA: Vision Guided Masked Attribute Modeling head, FB: Feature Binding head, CE: Classification head. Our framework mainly contains three parts: I) a learnable multimodal tokenizer to project diverse modalities into a unified embedding, II) a frozen foundation modal to extract complementary cross-modal representations, and III) several cross-modal heads used to dig cross-modality relationships. In order to alleviate the missing modality problem, we also leverage Channel Augmentation [72 ] and Lineart [ 53 ] to synthesize IR and sketch images that are missing.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661445805770,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 3. All in One Framework\\nIn this section, we describe the proposed AIO framework in detail. AIO exhibits the capability to adeptly handle uncertain multimodal data, encompassing RGB, IR, Sketch, and Text modalities. To realize this, we introduce a novel multimodal tokenizer designed to project data into a unified embedding space. Simultaneously, a large-scale pretrained foundation model serves as the shared feature extractor, encoding embeddings from diverse modalities. The learning process of the multimodal tokenizer is guided by cross-modal heads that are designed specifically for ReID tasks. The schematic of the AIO is illustrated in Fig. 2 .  \\n\\nPreliminary. Formally, within the AIO framework, three key components are denoted as follows: $I_{\\\\prime}$ )the multimodal tokenizer $\\\\psi^{m o d}(\\\\cdot),I I)$ the frozen multimodal encoder $f(\\\\cdot)$ , and III) cross-modal heads $\\\\Upsilon_{h e a d}(\\\\cdot)$ , where mod refers to the notation of each modality, such as RGB (R), IR (I), Sketch (S), and Text (T); head refers to the notation of Classification (CE), Vision Guided Masked Attribute Modeling (VA), and Feature Binding head (FB), respectively. The inputs are denoted as $x^{m o d}\\\\in X^{m o d}$ , the embeddings generated by tokenizers are $E^{m o d}$ , and the output feature from the frozen multimodal encoder is $z^{m o d}$ ,which is corresponding to class tokens in Fig. 2 . We assume $^{\\\\,l}$ )each modality possesses a specific parameter space $\\\\theta^{m o d}$ for modality-specific feature representations, and 2) there exists a shared parameter space $\\\\theta^{A}$ , an intersection of each modality parameter space for modality-shared feature representations, adhering to the condition:  \\n\\n$$\\n\\\\theta^{A}\\\\ \\\\in\\\\ \\\\theta^{R}\\\\cap\\\\theta^{I}\\\\cap\\\\theta^{S}\\\\cap\\\\theta^{T}\\\\ \\\\mathrm{and}\\\\;\\\\theta^{A}\\\\neq\\\\varnothing;\\n$$  \\n\\nFor a simple multimodal network with a cross-modal head $\\\\Upsilon$ can be written as:  \\n\\n$$\\n\\\\theta^{A}=\\\\underset{x\\\\in{\\\\cal{X}}^{m o d}}{\\\\arg\\\\operatorname*{min}}\\\\{\\\\Upsilon_{h e a d}\\\\circ z^{m o d}\\\\}\\n$$  \\n\\nwhere $z=f\\\\circ\\\\psi^{m o d}(x)$ ,$x\\\\in X^{m o d}$ is the input from any modality, tation of different modalities used in AIO is presented in ◦is the function composition operation. The noTab. 2 .\\n\\n# 3.1. Multimodal Tokenizer\\nTo project various modalities into a unified space, we devise a straightforward multimodal tokenizer. This tokenizer comprises four projectors: three dedicated to RGB, IR, and Sketch modalities, and one for Text. Furthermore, a multimodal embedding is constructed by amalgamating the embeddings from the respective modalities.  \\n\\nImage Tokenizers. Given the disparate channel counts in RGB, IR, and Sketch images, for the sake of convenience, we employ channel replication in both IR and Sketch modalities to align their channel count with the three channels present in RGB images. Deviating from the original tokenizer employed in ViT [ 10 ], leading to induce training instability [ 60 ], we opt for the IBN [ 44 ] style tokenizer from ViT-ICS [ 41 ]. The convolutional, batch normalization (BN), and rectified linear unit (ReLU) layers inherent to the IBN-style tokenizer substantially enhance training stability [5 ] and mitigate data bias which is critical for ReID [ 41 ].  \\n\\nTable 2. Notation of different modalities. Be aware that both Lineart [ 53 ] and CA [ 72 ] operations are introduced, and the generated images are considered as Sketch and IR images when these two modalities are missing.   \\n\\n\\n<html><body><table><tr><td>Source</td><td>Modality</td><td>Input</td><td>Embedding</td><td>Feature</td></tr><tr><td rowspan=\"4\">Raw</td><td>RGB (R)</td><td>XR</td><td>ER</td><td>ZR</td></tr><tr><td>IR (I)</td><td>X1</td><td>EI</td><td>ZI</td></tr><tr><td>Sketch (S)</td><td>XS</td><td>ES</td><td>ZS</td></tr><tr><td>Text (T) Multimodal (A)</td><td>XT</td><td>ET</td><td>ZT ZA</td></tr><tr><td rowspan=\"4\">Synthesis</td><td>CA [72]</td><td>XI</td><td>EA E</td><td>Z</td></tr><tr><td>Lineart [53]</td><td>XS</td><td>ES</td><td>ZS</td></tr><tr><td>Masked Text</td><td>XM</td><td>EM</td><td>ZM</td></tr><tr><td></td><td></td><td></td><td></td></tr></table></body></html>  \\n\\n  \\nFigure 3. The generated synthetic Sketch and IR images. We also visualize the feature distribution of RGB, IR, Sketch, and synthesized images.  \\n\\nText Tokenizers. In accordance with prior research efforts [23 ], we adopt the CLIP tokenizer [ 48 ] to directly map the text. Each word is uniquely associated with a token, and through the utilization of word embedding layers, it is projected into a high-dimensional feature space to yield a sequence of word embeddings.  \\n\\nMultimodal Embedding. In the context of multimodal embedding, the embeddings originating from various modalities are concatenated. Additionally, following previous works [ 8 ,10 ], a learnable token $z^{A}$ is appended to the sequence of multimodal embeddings. Simultaneously, position embeddings $E^{P o s}$ are employed to enhance position information, seamlessly integrated with the multimodal embeddings via element-wise addition, a procedure akin to the original operation in ViT [ 10 ]. The multimodal embedding is formulated as follows:  \\n\\n$$\\n\\\\begin{array}{r}{E^{A}=[z^{A},E^{R},E^{I},E^{S},E^{T}]+E^{P o s},}\\\\\\\\ {E\\\\in\\\\mathbf{R}^{n\\\\times D},E^{P o s}\\\\in\\\\mathbf{R}^{(n+1)\\\\times D}.}\\\\end{array}\\n$$\\n\\n# 3.2. Missing Modality Synthesis\\nGiven the insufficiency of multimodal data in ReID, especially in IR, and Sketch, we introduce Channel Augmentation (CA) [ 72 ] and Lineart [ 53 ] as augmentation methods to synthesize absent modalities. The generated samples are shown in Fig. 3 (a)-(c). The incorporation of synthetic modalities offers two advantages: 1) an expansion in the size of the multimodal sample, thereby mitigating issues associated with missing modalities; 2) CA and Lineart act as conduits bridging the gap between synthetic and real IR and sketch modalities. This is attributed to the feature distribution of the augmented images aligning between RGB and real IR and Sketch images. The visual representation of the feature distribution for RGB-Lineart-Sketch and RGB-CAIR, as presented in Fig. 3 (d) through t-SNE, serves as evidence of their efficacy in alleviating the learning challenges arising from modality gaps.  \\n\\nProgressively Learning with Synthetic data. We employ a progressively learning strategy to train the proposed AIO framework. The strategy involves initially training on synthetic images, incorporating real-world RGB and Text, for a few number of epochs. Subsequently, the model undergoes further fine-tuning using paired IR and Sketch images from the real world. This sequencing is deliberate, as synthetic images exhibit a reduced domain gap with RGB compared to real IR and Sketch images, facilitating a more accessible learning process for the model. A similar phenomenon is also found in other cross-modal works [ 19 ,39 ,70 ,76 ,83 ].',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661506361036,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 3.3. Multimodal Modeling and Binding\\nAll representations extracted by the frozen multimodal encoder from each embedding are fed into cross-modal heads $\\\\Upsilon_{h e a d}$ , which are specifically designed to learn cross-modal relationships between different modalities. As illustrated in Fig. 2 , there are three heads: 1) Conventional Classification Head , learning identity invariant representations like in other ReID works [ 26 ,35 ,87 ]; 2) Vision Guided Masked Attribute Modeling , seeking to learn fine-grained RGB-Text relationships; 3) Multimodal Feature Binding , aiming to align each modality representations together.  \\n\\nConventional Classification (CE). The classification head only contains a bottleneck [ 40 ] and a classifier, which is constrained by Cross-Entropy loss as follows:  \\n\\n<html><body><table><tr><td>Partition</td><td>Dataset</td><td>Venue</td><td>#ID</td><td>#RGB Imgs</td><td>#IR Imgs</td><td>#Sketch Imgs</td><td>#Text</td></tr><tr><td rowspan=\"3\">Train</td><td>SYNTH-PEDES[88]</td><td>arXiv23</td><td>312,321</td><td>4,791,771</td><td>-</td><td></td><td>12,138,157</td></tr><tr><td>LLCM [78]</td><td>CVPR23</td><td>1,064</td><td>25,626</td><td>21,141</td><td></td><td></td></tr><tr><td>MaSk1K [37]</td><td>ACMMM23</td><td>996</td><td>32,668</td><td></td><td>4,763</td><td></td></tr><tr><td rowspan=\"5\">Test</td><td>Market1501 [82]</td><td>ICCV15</td><td>1,501</td><td>32,668</td><td></td><td></td><td>1</td></tr><tr><td>SYSU-MM01 [59]</td><td>IJCV20</td><td>491</td><td>30,071</td><td>15,792</td><td>-</td><td></td></tr><tr><td>PKU-Sketch[46][75]</td><td>ACMMM18</td><td>200</td><td>400</td><td>1</td><td>200</td><td>200</td></tr><tr><td>CUHK-PEDES [30]</td><td>CVPR17</td><td>13,003</td><td>40,206</td><td></td><td>-</td><td>80,412</td></tr><tr><td>Tri-CUHK-PEDES [7]</td><td>CVPR23</td><td>13,003</td><td>40,206</td><td>-</td><td>40,206</td><td>80,412</td></tr></table></body></html>\\n\\nTable 3. The statistics of datasets used in experiments. More details can be found in corresponding papers.  \\n\\n$$\\n\\\\mathcal{L}_{C E}=-\\\\frac{1}{N}\\\\sum^{N}y\\\\log(\\\\Upsilon_{C E}\\\\circ z^{m o d}),\\n$$  \\n\\nwhere $N$ is the number of pedestrian IDs, $\\\\Upsilon_{C E}$ indicates the Conventional Classification head. The conventional Triplet Loss, commonly employed in related frameworks, is omitted in our architecture, as we opt for the utilization of a multimodal feature binding loss.  \\n\\nVision Guided Masked Attribute Modeling (VA). Attributes play a pivotal role in highlighting essential characteristics of an individual, encompassing factors such as gender and hair color. These attributes are instrumental in cross-modal alignment and the differentiation of distinct individuals. In this context, we investigate the utility of attribute information embedded in the Text modality to serve as supervisory signals for learning discriminative person representations. To be specific, in the case of a paired RGB image and Text, we adopt a strategy from the prior work [66 ], where specific attribute keywords in the Text are selectively masked. These masked words are then projected to a special token $[M A S K]$ . Subsequently, the concatenated features of the paired RGB image and the masked token are fed into a decoder structured with MLPs and a classifier, represented as follows:  \\n\\n$$\\n\\\\mathcal{L}_{V A}=-\\\\frac{1}{N_{A}M}\\\\sum\\\\sum^{N_{A}}\\\\sum^{M}y\\\\log(\\\\Upsilon_{V A}(z^{R}\\\\oplus z^{m})),\\n$$  \\n\\nwhere $\\\\Upsilon_{V A}$ indicates the Vision Guided Masked Attribute Modeling head, $z^{m}\\\\;\\\\in\\\\;z^{M}$ are the features of masked tokens, $N_{A},M$ are the number of classes and the number of masked tokens, and $\\\\bigoplus$ denotes the concatenation operation. Multimodal Feature Binding (FB). To align all modalities onto a shared manifold, we attract features from all modalities towards the RGB feature. This alignment is facilitated through the incorporation of a novel supervised feature binding loss, elucidated in the subsequent section:  \\n\\n$$\\n\\\\mathcal{L}_{F B}=-\\\\sum\\\\log\\\\frac{\\\\exp(\\\\frac{1}{m o d}\\\\sum_{m o d\\\\neq R}||z_{i}^{R},z_{i}^{m o d}||/\\\\tau)}{\\\\sum_{i\\\\neq j}\\\\exp(||z_{i}^{R},z_{j}^{R}||/\\\\tau)},\\n$$  \\n\\nwhere $||\\\\cdot||$ is cosine similarity, $z_{i}^{R}$ is the representation of person i $i^{t h}$ RGB embedding, $z_{i}^{m o d}$ are the representations of person $i^{t h}$ other modalities embeddings, $z_{j}^{R}$ are the RGB representations belonging to other people, $\\\\tau$ is the temperature that controls the smoothness of the softmax distribution. Diverging from the conventional InfoNCE approach [ 52 ], our feature binding loss involves bringing together features from all modalities corresponding to the same individual, while simultaneously creating a separation between RGB features of distinct individuals, rather than applying the same principle to all features. This difference is motivated by the prevalence of RGB as the most common modality in real-world scenarios, contributing the most abundant data and consistently present in all publicly available datasets.\\n\\n# 3.4. Overall Architecture\\nAs elucidated earlier, the primary objective of the AIO framework is to learn a multimodal tokenizer through a frozen multimodal encoder, under the guidance of crossmodal heads. We believe that the emergent capabilities demonstrated in large-scale foundation models can effectively augment the zero-shot ability in multimodal ReID tasks. Additionally, capitalizing on the inherent adaptability of transformer architecture to accommodate variable input lengths, AIO exhibits competence in processing diverse combinations of commonly employed modalities in ReID. To realize this objective, AIO is constrained by three crossmodal heads, that can be written as follows:  \\n\\n$$\\n\\\\mathcal{L}=\\\\mathcal{L}_{C E}+\\\\alpha\\\\mathcal{L}_{V A}+\\\\mathcal{L}_{F B},\\n$$  \\n\\nwhere $\\\\alpha$ is a fixed weight to control the importance of Vision Guided Masked Attribute Modeling.\\n\\n# 4. Experiment\\nIn this section, we conduct a comprehensive evaluation of the proposed AIO framework across both cross-modal and multimodal ReID tasks. Our analysis demonstrates the efficacy of the AIO framework, particularly in zero-shot scenarios involving uncertain input modalities within ReID tasks. Additionally, we delve into the examination of varying foundation models and input modality combinations.  \\n\\n<html><body><table><tr><td>Heads</td><td>R→→R</td><td>I→R</td><td>S→→R</td><td>T→R</td><td>S+T→→R</td></tr><tr><td>Base</td><td>3.2</td><td>1.5</td><td>1.6</td><td>21.6</td><td>0.5</td></tr><tr><td>+ VA</td><td>6.4</td><td>0.9</td><td>1.2</td><td>52.4</td><td>0.6</td></tr><tr><td>+CE</td><td>78.0</td><td>43.7</td><td>46.8</td><td>53.5</td><td>91.8</td></tr><tr><td>+FB</td><td>79.6</td><td>57.6</td><td>70.2</td><td>53.4</td><td>92.1</td></tr></table></body></html>\\n\\nTable 4. Effectiveness of each cross-modal heads. The Rank-1 zero-shot performance is reported. The short notation of each modality will be used in this section , details can be found in Tab. 2 .\\n\\n# 4.1. Experimental settings\\nDatasets. Three publicly available datasets SYNTHPEDES [ 88 ] for R-T pairs, LLCM [ 78 ] for R-I images, MaSk1K [ 37 ] for R-S images are leveraged for training. For zero-shot performance evaluation, five widely used realworld datasets are used for evaluations, Market1501 [ 82 ] for $\\\\mathbf{R}{\\\\rightarrow}\\\\mathbf{R}$ ta YSU-MM01 [ 59 ] for $\\\\scriptstyle\\\\mathrm{I}\\\\to\\\\mathrm{R}$ tas U-Sketch Tri-CUHK-PEDES [ tics are shown in Tab. [46 ] for S ${\\\\bf S}{\\\\rightarrow}{\\\\bf R}$ →R task, CUHK 7 ] for T+S 3 . More details can be found in the $\\\\scriptstyle\\\\mathrm{T}+\\\\mathbf{S}\\\\rightarrow\\\\mathbf{R}$ →R task. The dataset statis[30 ] for T $\\\\mathrm{T}{\\\\rightarrow}\\\\mathrm{R}$ →R task, and original papers.  \\n\\nEvaluation Protocols. Following existing cross-modality ReID settings [ 6 ,31 ,36 ,61 ], we use the Rank$k$ matching accuracy, mean Average Precision (mAP) metrics, and mean Inverse Negative Precision (mINP) [ 69 ] for performance assessment. In the context of multimodal ReID, we adhere to the evaluation settings outlined in TriReID [75 ] and UNIReID [ 7 ] specifically designed for RGBText+Sketch scenarios. To accommodate other multimodal data combinations, we leverage CA [ 72 ] and Lineart [ 53 ]to generate simulated IR and Sketch images. While acknowledging that this may not perfectly simulate real-world scenarios, it provides valuable insights into the multimodal performance of the proposed AIO framework.  \\n\\nImplementation Details. We employ the ViT [ 10 ] as the backbone, which is pre-trained on LAION-2B dataset with contrastive learning, to reinforce the ability for generic token encoding. All parameters of the backbone networks are frozen. The Text tokenizer is from the pre-trained CLIP [ 48 ]to segment sentences into subwords and transform them into word embeddings. We perform a progressively learning strategy training process in AIO framework, as we discussed in Sec. 3.2 .stage 1) In the first 40 epochs, we sample 32 paired RGB and text samples from SYNTH-PEDES only combined with generated synthetic IR and Sketch images using CA [ 72 ] and Lineart [ 53 ]. Moreover, we randomly chose two to four embeddings from different modalities to build the multimodal embedding. It is worth noting that, multimodal embedding may not contain RGB embedding. stage 2) In the rest 80 epochs, we still select 32 samples for a batch but from all training datasets. For data from SYNTHPEDES, the sampling, synthetic methods, and construction of multimodal embedding are unchanged. For data from LLCM and MaSk1K, only paired RGB-IR and RGB-Sketch images are leveraged. The multimodal embedding for samples from these two datasets only contains available modalities. We also apply random horizontal flipping and random cropping for visual modalities. All images are resized to $384\\\\times192$ . The framework is optimized by AdamW [ 38 ]optimizer with a base learning rate of 1e-4, a cosine weight decay of $1e-4$ , and a warmup in the first 5 epochs. The learning rate of the CLIP tokenizer is multiplied by 1e-1 since they have already been pre-trained. The $\\\\alpha$ is set to 3e-1 and the $\\\\tau$ is set to 5e-2 as in [ 16 ]. The framework is distributively trained on 8 NVIDIA 3090 GPUs.  \\n\\n<html><body><table><tr><td>Model</td><td>R-R</td><td>I-→R</td><td>S-R</td><td>T→R</td><td>S+T→R</td></tr><tr><td>ViT*[41]</td><td>74.2</td><td>49.6</td><td>63.4</td><td>48.2</td><td>79.2</td></tr><tr><td>Uni* [28]</td><td>76.3</td><td>51.3</td><td>68.1</td><td>52.6</td><td>84.3</td></tr><tr><td>CLIP* [48]</td><td>77.7</td><td>55.5</td><td>69.5</td><td>52.8</td><td>87.6</td></tr><tr><td>LAION [80] t</td><td>79.6</td><td>57.6</td><td>70.2</td><td>53.4</td><td>92.1</td></tr></table></body></html>  \\n\\nTable 5. Zero-shot performance of AIO with different foundation models. The Rank-1 performance is reported. \\\\* indicates the tokenizer of the original model is replaced by ours. $^\\\\dagger$ is the backbone used in AIO.   \\n\\n\\n<html><body><table><tr><td>Multimodal</td><td>Rank-1</td><td>Rank-5</td><td>Rank-10</td></tr><tr><td>R+T R+I</td><td>56.5 48.2</td><td>76.2 70.7</td><td>85.1 79.3</td></tr><tr><td>I+T I+S</td><td>53.4</td><td>74.3</td><td>81.0</td></tr><tr><td></td><td>48.1</td><td>70.6</td><td>79.0</td></tr><tr><td>R+I+T</td><td>57.8</td><td>78.0</td><td>86.3</td></tr><tr><td>I+S+T</td><td>55.6</td><td>74.1</td><td>82.2</td></tr><tr><td>R+I+S+T</td><td>58.6</td><td>77.9</td><td>86.6</td></tr></table></body></html>\\n\\nTable 6. Zero-shot performance with multimodal input on TriCUHK-PEDES. Be aware that the IR images are generated by using CA [ 72 ] rather than real-world IR images.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661575567054,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# 4.2. Ablation Study\\nEfficacy of Designed Modules. We first evaluate the effectiveness of the designed components. As evident from Tab. 4 , each introduced cross-modal head proves crucial for the overall performance of our All-in-One (AIO) framework. Specifically, VA head yields the most substantial performance enhanceme he $\\\\mathbf{T}{\\\\rightarrow}\\\\mathbf{R}$ task, CE head plays an important role in the R cross-modal and multimodal tasks. $\\\\mathbf{R}{\\\\rightarrow}\\\\mathbf{R}$ →R task, and FB head improves all Different Foundation Models. We explore various foundation models, including Uni-Perceiver v2 [ 28 ] (Uni), a Vision Transformer (ViT) pre-trained on LuPerson [ 41 ], and the pre-trained image encoder from CLIP [ 48 ]. The performance of these diverse foundation models is presented in Tab. 5 . As discernible from the table, the performance demonstrates an upward trend with the expansion of the pretraining dataset. Notably, despite LuPerson’s exclusive focus on ReID tasks, its performance lags behind other models due to its comparatively smaller size. This discrepancy underscores the pronounced zero-shot performance benefits associated with large-scale pre-trained foundation models. Influence of Multimodality Input. Because our proposed AIO framework supports any combination of diverse modalities as input, we also analyze the influence of different combinations of multimodal inputs on Tri-CUHKPEDES [ 7 ], where the missing IR modality is generated by CA [ 72 ]. As presented in Tab. 6 , our analysis indicates a preference for RGB and text modalities within our framework over other modalities. Furthermore, when the number of input modalities reaches or exceeds three, there is no significant alteration in performance. This outcome aligns with expectations, as RGB and Text modalities inherently provide more discriminative details than others.  \\n\\n<html><body><table><tr><td rowspan=\"2\">Type</td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Venue</td><td colspan=\"2\">RR</td><td colspan=\"2\">IR</td><td colspan=\"2\">S→→R</td><td colspan=\"2\">T→R</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td rowspan=\"3\">Pre-train</td><td>LuPerson-NL [12]</td><td>CVPR22</td><td>24.6*</td><td>11.6*</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>PLIP [88]</td><td>arXiv23</td><td>80.4</td><td>59.7</td><td></td><td></td><td></td><td></td><td>57.7</td><td>-</td></tr><tr><td>APTM [66]</td><td>ACMMM23</td><td>5.3*</td><td>3.5*</td><td>-</td><td>-</td><td>-</td><td>-</td><td>9.6*</td><td>2.7*</td></tr><tr><td rowspan=\"3\">Unimodal</td><td>OSNet-IBN [84]</td><td>ICCV19</td><td>73.0</td><td>44.9</td><td>-</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>M?L [81]</td><td>CVPR21</td><td>78.3</td><td>52.5</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>OSNet-AIN [85]</td><td>TPAMI21</td><td>73.3</td><td>45.8</td><td>-</td><td></td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td rowspan=\"3\">Cross-modal</td><td>AGW [69]</td><td>TPAMI21</td><td>17.3*</td><td>6.9*</td><td>18.2*</td><td>19.1*</td><td></td><td></td><td></td><td></td></tr><tr><td>IRRA [23]</td><td>CVPR23</td><td>66.6*</td><td>40.5*</td><td></td><td></td><td></td><td>-</td><td>30.1*</td><td>25.3*</td></tr><tr><td>UNIReID [7]</td><td>CVPR23</td><td>19.0*</td><td>8.2*</td><td></td><td></td><td>69.8</td><td>73.0</td><td>11.6*</td><td>9.7*</td></tr><tr><td>Multimodal</td><td>AIO (Ours)</td><td>-</td><td>79.6</td><td>59.9</td><td>57.6</td><td>51.9</td><td>70.2</td><td>73.5</td><td>53.4</td><td>43.4</td></tr></table></body></html>\\n\\nTable 7. Zero-shot performance on cross-modal retrieval. The best Rank-1 and mAP performance are reported. Results with \\\\* indicate that the experiment results are produced by authors. GW [ 69 ], it is trained on MSMT17 [ 58 ] and LLCM [ 78 ] f $\\\\scriptstyle\\\\mathbf{R}\\\\to\\\\mathbf{R}$ a $\\\\mathrm{I}{\\\\rightarrow}\\\\mathrm{R}$ .For IRRA [ 23 ], it is trained on ICFG-PEDES [ 9 ] for T $\\\\mathrm{T}{\\\\rightarrow}\\\\mathrm{R}$ →R. For UNIReID [ 7 ], it is trained on Tri-ICFG-PEDES [ 7 ] for T $\\\\mathrm{T}{\\\\rightarrow}\\\\mathrm{R}$ →R and R $\\\\scriptstyle\\\\mathrm{R}\\\\to\\\\mathrm{R}$ →R.  \\n\\n<html><body><table><tr><td>Method</td><td>Rank-1</td><td>mAP</td><td>mINP</td></tr><tr><td>UNIReID (T→→R)</td><td>76.8</td><td>80.6</td><td>77.8</td></tr><tr><td>UNIReID (S-→R)</td><td>69.8</td><td>73.0</td><td>68.3</td></tr><tr><td>AIO (T→R)</td><td>78.2</td><td>81.7</td><td>78.4</td></tr><tr><td>AIO (S→→R)</td><td>69.8</td><td>72.8</td><td>68.8</td></tr><tr><td>UNIReID (S+T→R)</td><td>91.4</td><td>91.8</td><td>89.0</td></tr><tr><td>AIO (S+T→R)</td><td>92.1</td><td>92.2</td><td>89.2</td></tr><tr><td>AIO (R+S+T→→R)</td><td>93.6</td><td>93.7</td><td>90.0</td></tr><tr><td>AIO (R+I+S+T→R)</td><td>93.8</td><td>93.7</td><td>90.3</td></tr></table></body></html>\\n\\nTable 8. Zero-shot performance with multimodal input and generalized cross-modal on PKU-Sketch.\\n\\n# 4.3. Evaluation on Multimodal ReID\\nGiven the rarity of generalizable works across cross-modal, multimodal, and pre-trained ReID, we conduct a comprehensive comparative analysis involving the proposed AIO framework, various large-scale pre-trained ReID models, unimodal generalized methods, cross-modal methods, and multimodal methods, all within the zero-shot setting. As illustrated in Tab. 7 , the existing large-scale pre-trained ReID models, with the exception of PLIP, exhibit unsatisfactory performance in the zero-shot setting. Moreover, AIO achieves competitive performance compared to unimodal generalization methods on $\\\\scriptstyle\\\\mathbf{R}\\\\to\\\\mathbf{R}$ retrieval task and outperforms cross-modal methods on all cross-modal retrieval tasks in the zero-shot setting. Notably, existing methods fall short in generalizing to unseen modalities, a limitation overcome by AIO, which adeptly handles all four modalities in cross-modal tasks. The outcomes presented in Tab. 8 unveil the remarkable performance of the proposed AIO framework when incorporating multimodal input. This superior performance stands in stark contrast to methods relying solely on unimodal inputs in cross-modal tasks. Additionally, the results consistently underscore the impact of different modalities, aligning with the conclusions drawn from our preceding ablation studies that AIO is more in favor of Text and RGB modalities than others. Moreover, we also discuss the difference between AIO and UNIReID in detail and the limitation of AIO in the supplemental part.\\n\\n# 5. Conclusion\\nTo the best of our knowledge, this is the first work delving into the uncertain multimodal ReID tasks encompassing all four prevalent modalities, e.g . RGB, IR, Sketch, and Text. We investigate the feasibility of harnessing largescale foundation models for multimodal ReID tasks, presenting a prospective avenue toward zero-shot multimodal ReID in wild conditions. In order to cooperate with foundation models, we introduce an innovative multimodal tokenizer, designed to utilize disparate modality inputs within a shared embedding space, guided by carefully crafted crossmodal heads. Moreover, we introduce synthetic augmentation methods with a progressively learning strategy to alleviate the missing modality problem and mitigate the crossmodal gap between different modalities. Extensive experimentation demonstrates the efficacy and competitive performance of the proposed AIO framework across both zeroshot cross-modal and multimodal ReID tasks.  \\n\\nAcknowledgement. This work is partially supported by National Natural Science Foundation of China under Grant (62176188, 62361166629, 62225113, 62306215), and the Special Fund of Hubei Luojia Laboratory (220100015).\\n\\n\\n\\n# All in One Framework for Multimodal Re-identification in the Wild\\nSupplementary Material\\n\\n# Differences between UNIReID and AIO\\nThere are three distinctions between UNIReID and AIO:  \\n\\n1) Divergent Goals: UNIReID and AIO fundamentally differ in their objectives. UNIReID aims to construct a multimodal model for intra-domain retrieval with the descriptive query. At the same time, AIO is explicitly crafted for universal retrieval in real-world scenarios, with four arbitrary modalities or their combinations. Notably, all experiments in this paper follow a zero-shot generalizable setting, which is inapplicable for UNIReID.  \\n\\n2) Different Challenges: UNIReID demands paired multimodal data. In comparison, AIO confronts even more challenging scenarios, involving unpaired heterogeneous multimodal data, with imbalanced and missing modalities. Thus, we introduce synthesized modalities and build connections among imbalanced modalities.  \\n\\n3) Disparate Approach: UNIReID incorporates multiple tasks to accommodate uncertain multimodal input. The number of optimization objectives of UNIReID grows exponentially with the number of modalities, making it hard to extend to more modalities and hindering its scalability. Conversely, AIO designs a flexible solution, treating uncertain multimodal input as variable input lengths. It leverages the adaptable nature of the transformer architecture, simplifying the integration of additional modalities. Furthermore, UNIReID employs separate encoders for various modalities, resulting in a lack of synergy between distinctive modalities. Different from UNIReID, AIO leverages a shared foundation model as the backbone to collaboratively learn comprehensive knowledge from heterogeneous multimodal data to complement each other and enhance its generalizablity in real-world scenarios.  \\n\\nAll these differences make AIO more robust and generalizable than UNIReID in real scenarios.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024},\n",
       "  {'id': 454849661638743760,\n",
       "   'paper_id': '65fc055f13fb2c6cf6df27ae',\n",
       "   'paper_title': 'All in One Framework for Multimodal Re-identification in the Wild',\n",
       "   'chunk_id': 6,\n",
       "   'chunk_text': '# Limitation\\n1) The computational complexity of AIO, necessitating ${\\\\mathcal{O}}(n^{2}\\\\,\\\\times\\\\,D)$ ns for processing token embeddings $E^{\\\\dot{A}},E^{R},E^{I},E^{S},E^{T}$ , particularly in the context of multimodal input, imposes a substantial memory cost and computational burden. This complexity poses challenges in scalability for incorporating additional modalities and deployment on resource-constrained edge devices. We assess the inference speed across varying numbers of modalities. Tab. 9 shows that the computation complexity escalates exponentially with the increase in the number of modalities, as anticipated.  \\n\\n2) Furthermore, it is worth noting that the implementation of multimodal ReID on synthetic data may not perfectly align with real-world scenarios, but also brings valuable insights for future works.  \\n\\n<html><body><table><tr><td>Number of Modalities</td><td>Inference Speed (ms)</td></tr><tr><td>1</td><td>10.23</td></tr><tr><td>2</td><td>47.66</td></tr><tr><td>4</td><td>181.32</td></tr></table></body></html>\\n\\nTable 9. Computation complexity in the different number of input modalities. All results are calculated with 700 samples.  \\n\\n3) Moreover, the learnable parameters within the tokenizer are constrained compared to approaches that fine-tune the entire backbone, presenting a double-edged sword. While AIO is lightweight and user-friendly, it may not capture as much detailed knowledge as some alternatives. To address this challenge, a promising way is to selectively unfreeze a subset of deep layers within the backbone model, a direction we plan to investigate in future work.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db',\n",
       "   'year': 2024}],\n",
       " [{'id': 454946119675807240,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation\\nZhangfu $\\\\mathbf{Dong^{1}}$ ,Yuting $\\\\mathbf{H}\\\\mathbf{e}^{1}$ ,Xiaoming $\\\\mathbf{Q}\\\\mathbf{i}^{1}$ ,Yang Chen 1 ,,,Huazhong $\\\\mathbf{Shu^{1,2,3}}$ ,Jean-Louis Coatrieux 2 ,,Guanyu $\\\\mathbf{Yang^{1,2,3,*}}$ and Shuo $\\\\mathbf{Li^{4}}$   \\n1 LIST, Key Laboratory of Computer Network and Information Integration (Southeast University), Ministry of Education, Nanjing, China   \\n2 Jiangsu Provincial Joint International Research Laboratory of Medical Information Processing 3 Centre de Recherche en Information Biom´edicale Sino-Franc¸ais (CRIBs) 4 Dept. of Medical Biophysics, University of Western Ontario, London, ON, Canada yang.list $\\\\@$ seu.edu.cn\\n\\n# Abstract\\nThe nature of thick-slice scanning causes severe inter-slice discontinuities of 3D medical images, and the vanilla 2D/3D convolutional neural networks (CNNs) fail to represent sparse inter-slice information and dense intra-slice information in a balanced way, leading to severe underfitting to inter-slice features (for vanilla 2D CNNs) and overfitting to noise from long-range slices (for vanilla 3D CNNs). In this work, a novel mesh network (MNet) is proposed to balance the spatial representation inter axes via learning. 1) Our MNet latently fuses plenty of representation processes by embedding multi-dimensional convolutions deeply into basic modules, making the selections of representation processes flexible, thus balancing representation for sparse inter-slice information and dense intra-slice information adaptively. 2) Our MNet latently fuses multi-dimensional features inside each basic module, simultaneously taking the advantages of 2D (high segmentation accuracy of the easily recognized regions in 2D view) and 3D (high smoothness of 3D organ contour) representations, thus obtaining more accurate modeling for target regions. Comprehensive experiments are performed on four public datasets (CT&MR), the results consistently demonstrate the proposed MNet outperforms the other methods. The code and datasets are available at: https://github.com/zfdong-code/MNet',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022},\n",
       "  {'id': 454946119689963018,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 1 Introduction\\nThe rise of deep learning greatly drives the advance in 3D medical image segmentation [Lei et al. , 2020], however, it is still extremely challenging in the 3D images with anisotropic voxel spacing [Tajbakhsh et al. , 2020]. The nature of thickslice scanning [Goldman, 2007] (e.g. CT, MR) causes severe inter-slice discontinuities of 3D medical images, and the vanilla 2D/3D networks [Zhou et al. , 2020; Huang et al. , 2017] fail to represent sparse inter-slice information and dense intra-slice information in a balanced way. Specifically, 1) underfitting to inter-slice correlations: 2D CNNs [Zhou et al. , 2020; Zotti et al. , 2019] concentrate on the representation of dense intra-slice information, but completely fail to capture inter-slice correlations, making severe underfitting to inter-slice features, resulting in relatively stable but weak performance (Figure 4 in experiments section). 2) Overfitting to inter-slice noise: 3D CNNs [Jiang et al. , 2021; Milletari et al. , 2016] simultaneously perform convolution on $x/y.$ - and $z$ -axis without taking the anisotropic nature into account. The highly discontinuous sparse information along $z$ -axis adds substantial long-range noise to the representation of local features in $x y$ -plane, making the network prone to over-fitting, resulting in poor generalization ability with the aggravation of anisotropic degree (Figure 4).  \\n\\nDue to the limitations of using vanilla 3D or 2D CNNs for the segmentation of anisotropic volumes, some alternatives have been proposed to take the advantages of 3D and 2D convolutions simultaneously: 1) 2.5D CNNs [Wang et al. ,2019]. Instead of directly feeding images with anisotropic spacing into 3D CNNs, 2.5D CNNs perform 2D convolution $(3\\\\!\\\\times\\\\!3\\\\!\\\\times\\\\!1)$ and pooling $(2\\\\!\\\\times\\\\!2\\\\!\\\\times\\\\!1)$ ntil the spacing of $x/y$ -axis is increased to a similar level of z-axis before performing 3D convolution, thus roughly achieving balanced representation in 3D field. However, once the spacing ratio of each axis changes, these networks have to be adjusted manually and retrain to adapt (Figure 1(a)). 2) Ensemble of 2D and 3D CNNs. 2D CNNs have isotropic receptive fields in $x y$ -plane but fail to represent inter-slice features, while 3D CNNs are the opposite. To combine the merits of 2D and 3D CNNs, many methods integrate 2D and 3D CNNs in a serial [Lee et al. , 2015; Xia et al. , 2018] or parallel manner [Zheng et al. , 2019; Isensee et al. , 2021]. However, imbalanced representation of sparse inter-slice information and dense intra-slice information still exists deeply inside these result-level serial or parallel combinations owing to their independent multidimensional representation processes, limiting the final segmentation accuracy.  \\n\\nIn this paper, we propose a novel MNet for anisotropic medical image segmentation, which represents sparse interslice information and dense intra-slice formation in a balanced way, avoiding the network prone to under/over-fitting to inter-slice information, thus achieving excellent segmentation performance and strong adaptability to the aggravation of anisotropic degree. Specifically:  \\n\\n  \\nFigure 1: The rethinking of 2D/3D Networks for anisotropic medical image segmentation. (a) Rethinking the 2D/2.5D/3D CNNs. 2D CNNs only represent features in $x y$ -plane with even receptive fields, making severe underfitting of inter-slice features. 3D CNNs can capture interslice correlations but suffer from the overrepresentation for highly discontinuous long-range sparse information along $z$ -axis, which makes the network prone to over-fitting. 2.5D CNNs first use 2D convolutions and pooling for intra-slice feature extraction until the resolution becomes isotropic before performing 3D convolutions, suffering from poor adaptability. (b) We first make a shallow latent fusion of multiple 2D/2.5D/3D CNNs to achieve a multi-path representation process, adapting to the variation of spacing ratios. (c) We further make a deep latent fusion of multi-dimensional convolutions, extending the multi-path network to a mesh architecture, achieving free selections of representation processes via learning, thus realizing balanced representation for anisotropic information.  \\n\\nInnovation 1 : Latent fusion of representation processes. MNet adaptively balances the representation inter axes in the learning process, instead of determining the manner about how to adjust spacing ratio before training. 1) We first propose the shallow latent fusion which simultaneously fuses multiple 2D/2.5D/3D CNNs (Figure 1(a)) to achieve a multipath representation process (Figure 1(b)), thus adapting to the variation of spacing ratios inter axes and achieving preliminarily balanced representation without manual adjustments. 2) We further make a deep latent fusion of multi-dimensional convolutions, extending the multi-path network to a mesh architecture (Figure 1(c)), achieving free selections of representation processes via learning, thus realizing balanced representation for sparse inter-slice information and dense intra-slice information. Specifically, at each latent block, if the sparse inter-slice information is over-represented, there is a 2D convolution follows, which doesn’t further aggregate inter-slice features. Also, if correlations along $z$ -axis are required, the 3D convolution exists as well.  \\n\\nInnovation 2 : Latent fusion of multi-dimensional features. MNet contains plenty of latent representation processes (LRP), and the multi-dimensional features from different LRP are fed into basic modules contained in MNet, where the feature-level fusion of anisotropic information is performed for balanced and accurate feature representations. As shown in Figure 2(c), multi-dimensional features are fused to simultaneously take the advantages of 2D and 3D representations that 1) the isotropic receptive field of 2D CNNs leads to the high segmentation accuracy of the easily recognized regions in 2D view, and 2) 3D CNNs pay more attention to the smoothness of 3D organ contour and some structures that are not obvious in 2D view. Therefore, compared with the result-level fusion [Zheng et al. , 2019; Isensee et al. , 2021], our feature-level fusion makes better representations from shallow layers to deep layers, obtaining more accurate modeling for target regions.  \\n\\nOur contributions are summarized as follows:  \\n\\n-We propose a novel CNN architecture, MNet, for anisotropic medical image segmentation, which represents sparse inter-slice information and dense intra-slice formation in a balanced way, thus avoiding under- or overrepresentation to long-range inter-slice features.  \\n\\n-We propose the latent fusion of plenty of representation processes by embedding multi-dimensional convolutions deeply into basic modules contained in MNet, achieving free selections of representation processes via learning, thus balancing representation for sparse inter-slice information and dense intra-slice information adaptively.  \\n\\n-We propose the latent fusion of multi-dimensional features which fuses features from multiple LRP inside each basic module, simultaneously taking the advantages of 2D (high segmentation accuracy of the easily recognized regions in 2D view) and 3D (high smoothness of 3D organ contour) representations, thus obtaining more accurate modeling for target regions.  \\n\\n-We demonstrate the excellent performance of MNet for the segmentation of 3D medical images with anisotropic resolutions by conducting extensive experiments on four widelyused public datasets (CT&MR).',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022},\n",
       "  {'id': 454946119703332364,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 2 Related Works\\n\\n# 2.1 Standalone 2D/3D CNNs\\nThe representative of the slice-by-slice segmentation methods is 2D U-Net [Ronneberger et al. , 2015]. The skip connection adopted by U-Net enables the fusing of the fine-grained feature maps from the shallow layer and the coarse-grained but semantic feature maps in the deep layer. Furthermore, UNet $^{++}$ [Zhou et al. , 2020] adopts a build-in ensemble of UNets of varying depths for multi-scale objects and redesigns skip connections for more flexible feature fusion in the decoders. Due to the isotropic voxel spacing of 2D slices $(x y$ plane), the above 2D methods naturally have matched fields of view along $x$ - and $y$ -axis. However, without constraints from adjacent slices, the 3D organ contours formed by stacking 2D segmentation results are not smooth enough along the $z$ -axis.  \\n\\nThe segmentation object of 3D CNNs is the 3D volume instead of 2D slices [Milletari et al. , 2016], which enables the extraction of inter-slice information. In the work of [C¸ ic¸ek et al. , 2016], the 2D operation in U-Net is replaced with a 3D counterpart to obtain 3D U-Net, which realizes end-toend 3D medical image segmentation. To deepen 3D networks without increasing the parameters, ALA-Net [Jiang et al. , 2021] adopts the global context encoder (GCE) with the recurrent strategy. To efficiently aggregate long-range information, [Wang et al. , 2020] propose the non-local U-Nets embedded with the global aggregation block to aggregate longdistance dependencies in biomedical images. The segmentation results of 3D CNNs are smoother than 2D CNNs along the z-axis owing to the utilization of inter-slice information. However, 3D CNNs will make imbalanced feature representation if the input images are anisotropic.\\n\\n# 2.2 Combination of 2D&3D CNNs\\nAnisotropic spatial resolutions of medical images lead to the uneven physical receptive field in 3D convolutions. To make the receptive field of real world even, the 2.5D U-Net [Wang et al. , 2019] first uses the 2D convolution and pooling for intra-slice feature extraction until the spacing of the $x y$ plane is reduced to a similar level of inter-slice resolution before performing 3D convolution. However, once the resolution ratio of inter axes changes, 2.5D U-Net has to be adjusted manually and retrain to adapt.  \\n\\nFeatures of anisotropic images extracted by 2D and 3D CNNs have their own strengths and weaknesses. It is a natural idea to fuse multi-dimensional features for better segmentation performance, and there are already some models that benefit from the fusion[Lee et al. , 2015; Xia et al. , 2018]. In the study of nnU-Net [Isensee et al. , 2021], the final results are taken from the ensemble of separately trained 2D and 3D U-Nets for better generalization performance. Instead of training 2D and 3D models separately, H-DenseUNet [Li et al. , 2018] adopts a hybrid feature fusion layer to form better representations with the multi-dimensional features generated by 2D and 3D subnets. The above fusion of 2D and 3D features is mainly for the result-level features, but the independent 2D/3D CNNs still have uneven physical receptive fields inside their feature representation process.\\n\\n# 3 Method\\n\\n# 3.1 Mesh Architecture\\nAs shown in Figure 2(a), MNet is composed of $5{\\\\ast}5$ modules. Modules with purple background contain both the 2D and 3D convolution blocks, while modules with red or blue background have a single 2D or 3D block. Inside the 2D or 3D block are two $3\\\\!\\\\times\\\\!3\\\\!\\\\times\\\\!1$ or $3\\\\!\\\\times\\\\!3\\\\!\\\\times\\\\!3$ convolutional layers, both layers are followed by the instance normalization and the LeakyReLU activation function. Using instance normalization instead of batch normalization is mainly because the batch size is limited by GPU memory for 3D images.  \\n\\nConnecting each module with its neighbors, the mesh structure of MNet can be naturally formed. Benefiting from the mesh structure, we have various subnets contained in MNet. The different combinations of 2D and 3D modules make the structure of each subnet different from each other, leading to the extracting of more discriminative features. Encoder-decoder structure is commonly applied in networks for semantic segmentation. Any serial subnet in MNet follows the encoder-decoder structure. Taking the 3D subnet composed of the 3D convolution blocks in the first column and the fifth row as an example, the maxpooling is used for reducing the spatial resolution and aggregating long-range information at the encoding stage of the 3D subnet, which is totally performed four times. Symmetrically, the decoder adopts linear interpolation to gradually recover the spatial resolution of feature maps. The number of filters $(K)$ of each convolution block at encoding stage is set to  \\n\\n$$\\nK=32+16\\\\times(D e p t h-1),\\n$$  \\n\\nwhere ${D e p t h\\\\in\\\\{1,2,3,4,5\\\\}}$ , and the filter number of decoder is symmetrical with encoder.\\n\\n# 3.2 Latent Multi-dimensional Feature Fusion\\nTo combine the merits of 2D and 3D representations, features extracted from different geometric perspectives should be fused. Modules at various depths of MNet are designed to have multiple inputs or/and outputs, so as to make the extraction and fusion of multi-dimensional features throughout the forwarding propagation. The details of the latent fusion in the decoding stage are shown in Figure 2(c). Let $X_{2d}$ and $X_{3d}$ denote the high-level 2D and 3D feature maps from latent representation process $L R P1$ and $L R P2$ .$X_{2d}$ and $X_{3d}$ are first fed into the upsampling layers with scale factors of $1\\\\!\\\\times\\\\!2\\\\!\\\\times\\\\!2$ $(U_{2d})$ and $2\\\\!\\\\times\\\\!2\\\\!\\\\times\\\\!2$ $(U_{3d})$ , respectively. After that, the multi-dimensional features are passed to the feature merging unit (FMU) which is set to element-wise subtraction followed by the operation of taking the absolute value $(a b s)$ according to experimental performance, the merged features $F_{m}$ can be defined as  \\n\\n$$\\nF_{m}=a b s(U_{2d}(X_{2d})-U_{3d}(X_{3d})),\\n$$  \\n\\n  \\nFigure 2: The architecture of our MNet. (a) The mesh structure makes the selections of representation processes unconstrained by embedding multi-dimensional convolutions deeply into latent basic modules. Supervision information is provided to six additional output branches to fully train shallow layers. (b) MNet simultaneously latently fuses plenty of representation processes to adaptively form a balanced representation process via learning for anisotropic information inter axes. (c) MNet latently fuses multi-dimensional and multi-level features inside basic modules, simultaneously taking the advantages of 2D and 3D representations, thus obtaining more accurate modeling for target regions.  \\n\\nIn the process of forwarding propagation, the foreground is gradually detected. However, with the feature level getting higher, the details of the target organ are gradually lost. Therefore, the low-level features with detailed information in the encoder are passed to the decoder to assist in the recovery of the organ contour. Let $X_{2d}^{e},X_{3d}^{e}$ represent the feature maps from $L R P3$ and $L R P4$ , which are passed to the decoding stage through skip-connection. Similarly, the merged features $F_{m}^{e}$ mcan be defined as  \\n\\n$$\\nF_{m}^{e}=a b s(X_{2d}^{e}-X_{3d}^{e}).\\n$$  \\n\\nSubsequently, the channel-wise concatenation is applied to $F_{m}^{e}$ and $F_{m}$ followed by 2D and 3D convolutions for the extraction of more balanced and accurate features.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022},\n",
       "  {'id': 454946119716963854,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 3.3 Latent Representation Process Fusion\\nAs shown in Figure 2(b), the existing representation processes (e.g., 2D/2.5D/3D CNNs) are latently contained in our MNet, and plenty of novel representation processes can be derived from our MNet, making the selections of representation processes unconstrained, thus balancing representation adaptively. Taking the latent 2.5D CNN as an example, we explain the process about how to balance the representation of anisotropic information. Assuming that the spacing ratio $(z:x/y)$ of the input image is 1:4, the physical distance along $z$ -axis between two adjacent voxels is much longer than it along $x/y$ -axis in the real world, so the convolution and maxpooling are first performed only on $x y$ -plane two times, gradually increasing the intra-slice spacing, thus making the spacing ratio become 1:1. After that, the isotropic feature maps are fed into the 3D subnet for balanced spatial feature representation.\\n\\n# 3.4 Deep Optimization for Mesh Architecture\\nWe perform a deep optimization to ensure the trainable parameters at various depths of mesh architecture are fully optimized. Specially, we adopt the $1\\\\!\\\\times\\\\!1\\\\!\\\\times\\\\!1$ convolution to form six additional output branches. For the segmentation results of each branch, a resampled label is provided for the loss calculation [Pang et al. , 2021; Shen et al. , 2020]. The weighted sum of each loss item is taken as the final loss. The loss function we adopt is the hybrid of cross-entropy loss and dicecoefficient loss, defined as  \\n\\n$$\\nl(X,Y)=-(\\\\frac{2}{C}\\\\sum_{c}\\\\frac{\\\\sum_{i}x_{c i}y_{c i}}{\\\\sum_{i}x_{c i}+\\\\sum_{i}y_{c i}}+\\\\sum_{i}\\\\sum_{c}y_{i c}l o g x_{i c}),\\n$$  \\n\\nwhere $X$ is the prediction with softmax applied and $Y$ is the ground truth. $x/y$ is the voxel contained in $X/Y$ .$i$ denotes the index of els, while $c\\\\in C$ is the class of the current channel. Let $X_{i j}$ denotes the segmentation results of modules in raw $i$ and column $j,Y_{i j}$ denotes the corresponding ground truth, the final loss can be defined as  \\n\\n$$\\n{\\\\mathcal{L}}=l(X_{55},Y_{55})+\\\\sum_{i=2}^{4}\\\\lambda_{i}(l(X_{i5},Y_{i5})+l(X_{5i},Y_{5i})),\\n$$  \\n\\nwhere $\\\\begin{array}{r}{\\\\lambda=\\\\left(\\\\frac{1}{2}\\\\right)^{5-i}}\\\\end{array}$ is the weight of each loss item.  \\n\\nTable 1: Comparison of MNet and other methods on four widely-used datasets in terms of Dice $(\\\\%)$   \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Methods</td><td rowspan=\"2\">Takeanisotropy into account</td><td colspan=\"2\">LiTS</td><td colspan=\"2\">KiTS</td><td colspan=\"2\">BraTS</td><td colspan=\"2\">PROMISE</td></tr><tr><td>Liver</td><td>Tumor</td><td>Kidney</td><td>Tumor</td><td>ED</td><td>NCR&NET</td><td>ET</td><td>Prostate</td></tr><tr><td>3D U-Net</td><td>×</td><td>90.1</td><td>51.0</td><td>95.5</td><td>63.7</td><td>81.7</td><td>70.1</td><td>82.7</td><td>85.6</td></tr><tr><td>2D U-Net</td><td></td><td>91.2</td><td>58.1</td><td>95.8</td><td>70.1</td><td>82.6</td><td>71.1</td><td>83.5</td><td>88.1</td></tr><tr><td>2.5D U-Net</td><td></td><td>93.3</td><td>58.4</td><td>96.2</td><td>79.1</td><td>82.5</td><td>70.5</td><td>83.1</td><td>88.4</td></tr><tr><td>nnUNet</td><td></td><td>94.1</td><td>62.0</td><td>96.3</td><td>79.1</td><td>83.3</td><td>71.4</td><td>83.7</td><td>89.5</td></tr><tr><td>Our MNet</td><td></td><td>94.3</td><td>66.3</td><td>96.4</td><td>81.8</td><td>83.5</td><td>72.0</td><td>83.5</td><td>89.8</td></tr></table></body></html>\\n\\n# 4 Experiments\\n\\n# 4.1 Experiments Setup\\nDatasets: Four widely used public datasets, which involve multiple modalities, are selected for comprehensive evaluations. Two CT datasets: 1) The Liver and Liver Tumor Segmentation challenge 2017 (LiTS) contains 131 labeled training CT images, target regions are the liver and liver tumors. 2) The Kidney and Kidney Tumor Segmentation challenge 2019 (KiTS) provides 210 labeled training CT images whose target regions are the kidneys and kidney tumors. Two MR datasets: 1) The Multimodal Brain Tumor Segmentation Challenge 2020 (BraTS) provides 369 labeled training cases. Each case has T1-weighted (T1), post-contrast T1- weighted (T1ce), T2-weighted (T2), and fluid attenuated inversion recovery (Flair) sequences. The target regions are the peritumoral edema (ED), necrotic&the non-enhancing tumor core (NCR&NET), and enhancing tumor (ET). 2) The T2 MR dataset of the PROMISE challenge 2012 contains 50 labeled training cases, the segmentation target is the prostate.  \\n\\nImplementation Details: The stochastic gradient descent (SGD) with a momentum of 0.99 is selected as the optimizer. The initial learning rate (0.01) is gradually reduced according to the ”poly” learning rate policy [Chen et al. , 2018], and the maximum epoch is set to 500. Following the default setting of nnU-Net, the batch sizes for LiTS, KiTS, BraTS, and PROMISE are 2, 2, 4, 2, while the patch sizes are set to $40\\\\!\\\\times\\\\!224\\\\!\\\\times\\\\!192$ ,$32\\\\!\\\\times\\\\!224\\\\!\\\\times\\\\!224$ ,$28\\\\!\\\\times\\\\!192\\\\ \\\\times\\\\!160$ ,$16\\\\!\\\\times\\\\!320\\\\!\\\\times\\\\!320$ respectively. Extensive data augmentation techniques are employed to improve the generalization ability ([Isensee et al. , 2021]). Deep supervision and the same model training&selection schema are applied to all the involved networks for fairness. The experiments are performed with a 32GB V100 GPU. Networks implemented with MindSpore 1 and PyTorch are available at: https://github.com/zfdongcode/MNet.  \\n\\nEvaluation Metric: To quantitatively evaluate the segmentation performance, the Dice similarity coefficient (Dice) is used as the metric in this work. Dice is the widely used geometrical metric [Taha and Hanbury, 2015], which measures the overlapping between the prediction and the manual label (higher is better).  \\n\\nComparison Settings: Extensive experiments are performed on four datasets to fully evaluate the performance of the proposed MNet. Each dataset is randomly split into the training set $(80\\\\%)$ and testing set $(20\\\\%)$ . After training for 500 epochs, the model of the final epoch is used for testing. The training processes for all the networks are automatically finished by the nnUNet framework for fairness. 1) Effectiveness for anisotropic medical images : We first unify the spacings along $z$ -axis of the four datasets to $5\\\\mathrm{mm}$ , which is a common spacing of thick-slice medical images, and their spacings along $x/y$ -axis are all below $1\\\\mathrm{mm}$ , then, we compare our MNet and other methods to verify the effectiveness of our innovations. 2) Adaptability to aggravation of anisotropic degree : After demonstrating the advanced nature for the segmentation of images with large inter-slice spacing (i.e., $5\\\\mathrm{mm}$ ), we further explore the adaptability to the interslice spacing changes of MNet and other methods. We unify the spacing along $z$ -axis of LiTS dataset five times, obtaining five datasets with inter-slice spacings from $1\\\\mathrm{mm}$ to $5\\\\mathrm{mm}$ to train all the networks. 3) Selection of feature merging unit :Finally, we compare the performance of MNet with different feature merging units on the four datasets.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022},\n",
       "  {'id': 454946119730071056,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 4.2 Comparison with State-of-the-art Methods Effectiveness for Anisotropic Medical Images\\nThe proposed MNet consistently achieves the best performance on four widely-used datasets compared with other advanced methods. Three valuable conclusions can be drawn from Table 1: 1) The methods with anisotropy taken into consideration significantly outperform the others. The top segmentation results are rarely produced by generic approaches, which indicates the anisotropic nature does bring challenges to segmentation tasks and the necessity of our innovations. 2) The best results are almost always produced by our MNet. Among methods taking anisotropy into account, 2.5D UNet serially combines 2D and 3D convolutions, making the features fed into 3D convolutions are roughly isotropic, achieving higher performance than standalone 2D and 3D CNNs. The architecture of nnUNet can be dynamically adjusted according to the properties of the given dataset without any manual intervention, resulting in powerful generalization ability, thus outperforming 2.5 UNet. Instead of determining the manner about how to adjust spacing ratio before training, our MNet adaptively balances the representation inter axes in the learning process, owing to its flexible latent extraction and fusion of multi-dimensional representations. 3)  \\n\\n  \\nFigure 3: Visualization of segmentation results obtained by different methods for qualitative comparison, which have been cropped for clarity.   \\nTable 2: Comparison between different types of feature merging unit (FMU) on the four public datasets in terms of Dice $(\\\\%)$ . The reported results are the mean Dice of different target regions. Sum and Sub indicate the summation and absolute value of the subtraction, respectively.  \\n\\nMNet outperforms other methods by a large margin on fragile structures (e.g., tumors). The results of MNet on tumors of LiTS (66.3) and KiTS (81.8) are more than 10 percent higher than 3D UNet (51.0 and 63.7), demonstrating the discontinuity inter-slice on fragile structures is much more severe than it on large target regions (e.g., kidney), where especially need our balanced representation processes.  \\n\\nTo make a qualitative analysis of the segmentation performance, we visualize the segmentation results of the four public datasets. As shown in Figure 3, the results of MNet are closest to the ground truth (GT) compared with other methods.\\n\\n# Adaptability to Aggravation of Anisotropic Degree\\nThe proposed MNet has strong adaptability to the aggravation of anisotropic degree in medical images. The spacing along $z$ -axis can vary largely from one dataset to another, to explore the adaptability of different approaches to the variation, five datasets derived from the LiTS dataset are separately used to train the methods of comparison. Specifically, to obtain datasets with continuously varying inter-slice spacings, we resample the LiTS dataset five times with five target interslice spacings ( $1\\\\mathrm{mm}$ to $5\\\\mathrm{mm}$ ), while the intra-slice spacing keeps the same $(0.77\\\\mathrm{mm}\\\\!\\\\times\\\\!0.77\\\\mathrm{mm})$ ).  \\n\\n  \\nFigure 4: Adaptability to aggravation of anisotropic degree. Our MNet is robust to the aggravation of anisotropic degree, while the performance of other approaches drops drastically. Mean Dice of the liver and tumors are used as evaluation metric.  \\n\\n<html><body><table><tr><td>Methods</td><td>LiTS</td><td>KiTS</td><td>BraTS</td><td>PROMISE</td><td>Mean</td></tr><tr><td>Sum</td><td>79.3</td><td>88.3</td><td>79.9</td><td>90.0</td><td>84.4</td></tr><tr><td>Sub</td><td>80.3</td><td>89.1</td><td>79.7</td><td>89.8</td><td>84.7</td></tr></table></body></html>  \\n\\nAs shown in Figure 4, 1) our MNet is robust to the aggravation of anisotropic degree, making the mean Dice around $80\\\\%$ , outperforming other approaches by a large margin $10\\\\%$ higher than 3D UNet when spacing is $5\\\\mathrm{mm}$ ). 2) The performance of 2D UNet is stable but never achieves a high level owing to the lack of information along $z$ -axis. 3) 3D UNet performs well when the spacing is roughly isotropic $(0.77{\\\\times}0.77{\\\\times}1m m^{3})$ ), however, with the inter-slice continuity getting lower, fails to balance the representations between $x/y-$ and $z$ -axis, leading to dramatic performance degradation. 4) Based on the automatic adjustments to the network architecture, nnUNet avoids large performance degradation, but its adaptability is weaker than our MNet since external adjustments made before training cannot well suit the anisotropic nature like our latent multi-dimensional representation balancing learned in the training process.\\n\\n# 4.3 Selection of Feature Merging Unit\\nThe fusion of features from multiple latent representation processes achieves balanced and accurate representation for anisotropic information. We perform experiments on four datasets to explore the relationship between segmentation performance and different feature merging approaches. Sum makes the merged features contain richer information, while Sub enhances the differences between multiple inputs. As shown in Table 2, two types of FMU consistently achieve high and similar performance across the four widely-used datasets, demonstrating what really counts is the behavior of latent fusion and extraction of multi-dimensional representations instead of the specific design of FMU.\\n\\n# 5 Conclusion\\nIn this paper, we propose MNet for anisotropic 3D medical image segmentation which can represent sparse interslice information and dense intra-slice information in a balanced way, thus avoiding under- or over-representation to inter-slice features. Instead of determining the manner about how to adjust spacing ratio before training, our MNet adaptively balances the representation inter axes in the learning process, owing to its free latent extraction and fusion of multidimensional representations. Extensive experiments are performed on four widely-used public datasets, results demonstrate the proposed MNet not only outperforms the methods of comparison but also has outstanding adaptability to the aggravation of anisotropic degree.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022},\n",
       "  {'id': 454946119741605394,\n",
       "   'paper_id': '627b29bb5aee126c0f0fe830',\n",
       "   'paper_title': 'MNet: Rethinking 2D/3D Networks for Anisotropic Medical Image Segmentation.',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# Acknowledgments\\nThis work was supported in part by the National Key Research and Development Program of China (No. 2021ZD0113202), in part by the National Natural Science Foundation under grants (61828101), CAAI-Huawei MindSpore Open Fund, CANN(Compute Architecture for Neural Networks), Ascend AI Processor, and Big Data Computing Center of Southeast University.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db',\n",
       "   'year': 2022}],\n",
       " [{'id': 454847689415070418,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 0,\n",
       "   'chunk_text': '# MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding\\nJun Chen 1 ∗Ming $\\\\mathrm{Hu^{1*}}$ Darren J. Coker 1 Michael L. Berumen 1 Blair Costelloe 2 ,Sara Beery 4 Anna Rohrbach 5 Mohamed Elhoseiny 1 1 King Abdullah University of Science and Technology (KAUST) 2 Max Planck Institute of Animal Behavior , 3 University of Konstanz 4 Massachusetts Institute of Technology, 5 University of California, Berkeley  \\n\\n  \\nFigure 1. We propose MammalNet, a large-scale video benchmark for recognizing mammals and their behavior. It is built around a biological mammal taxonomy spanning 17 orders, 69 families and 173 mammal categories, and includes 12 common high-level mammal behaviors (e.g., hunt, groom). MammalNet enables the study of animal and behavior recognition, both separately and jointly. It also facilitates investigating challenging compositional scenarios which test models’ zero- and low-shot transfer abilities. Moreover, MammalNet includes behavior detection by localizing when a behavior occurs in an untrimmed video. Our dataset is the first to enable animal behavior analysis at scale in an ecologically-grounded manner, and exemplifies multiple challenges for the computer vision community, such as recognition of imbalanced, hierarchical distributions of fine-grained categories and generalization to unseen or seldom seen scenarios.\\n\\n# Abstract\\nMonitoring animal behavior can facilitate conservation efforts by providing key insights into wildlife health, population status, and ecosystem function. Automatic recognition of animals and their behaviors is critical for capitalizing on the large unlabeled datasets generated by modern video devices and for accelerating monitoring efforts at scale. However, the development of automated recognition systems is currently hindered by a lack of appropriately labeled datasets. Existing video datasets 1) do not classify animals according to established biological taxonomies; 2) are too small to facilitate large-scale behavioral studies and are often limited to a single species; and 3) do not feature temporally localized annotations and therefore do not facilitate localization of targeted behaviors within longer video sequences. Thus, we propose MammalNet , a new large-scale animal behavior dataset with taxonomy-guided annotations of mammals and their common behaviors. MammalNet contains over 18K videos totaling 539 hours, which is ${\\\\sim}I O$ times larger than the largest existing animal behavior dataset [ 36 ]. It covers 17 orders, 69 families, and 173 mammal categories for animal categorization and captures 12 high-level animal behaviors that received focus in previous animal behavior studies. We establish three benchmarks on MammalNet: standard animal and behavior recognition, compositional low-shot animal and behavior recognition, and behavior detection. Our dataset and code have been made available at: https://mammal-net.github.io .\\n\\n# 1. Introduction\\nAnimal species are a core component of the world’s ecosystems. Through their behavior, animals drive diverse ecological processes, including seed dispersal, nutrient cycling, population dynamics, speciation, and extinction. Thus, understanding and monitoring the behaviors of animals and their interactions with their physical and social environments is key to understanding the complexities of the world’s ecosystems, an objective that is especially critical now given the ongoing biodiversity crisis [ 12 ].  \\n\\nModern sensors, including camera traps, drones, and smartphones, allow wildlife researchers, managers, and citizen scientists to collect video data of animal behavior on an unprecedented scale [ 43 ]. However, processing this data to generate actionable, timely insights remains a major challenge. Manual human review and annotation of footage to identify and locate species and behavioral sequences of interest is time-intensive and does not scale to large datasets. Thus, methods for automated animal and behavioral recognition could open the door to large-scale behavioral monitoring and speed up the time to produce usable data, thereby reducing the time to implement management directives.  \\n\\nThe first essential step to creating such an AI system for animal and behavior recognition is curating a diverse, representative dataset that allows us to formalize these challenges as computer vision tasks and benchmark potential solutions. Most previous datasets either only cover a limited number of animal and behavior types [ 4 ,38 ], or do not implement animal labeling [ 36 ], or include a small number of videos with insufficient environmental diversity [ 4 ,38 ,48 ]. Recently, a dataset named “Animal Kingdom” [ 36 ] was proposed to study animal actions and is currently the largest existing behavioral dataset, to the best of our knowledge. However, it only contains 4,310 videos totaling 50 hours, which might be insufficient for large-scale animal behavior studies considering its diversity. Furthermore, the authors only focus on the recognition of atomic actions such as yawning, swimming, and flying. These basic actions cannot be easily matched to the higher-order behavioral states that are of primary interest to end users in animal management and conservation [ 6 ]. For example, a cheetah that is running may either be hunting, escaping, or playing. Finally, and most importantly, they do not support some important tasks such as animal recognition and behavior detection which are essential for animal behavior understanding.  \\n\\nTo overcome the limitations of previous datasets, we propose a new dataset called MammalNet . We specifically focus on mammals since they, unlike other animal classes such as birds or insects, usually have more diverse and distinguishable behavior statuses. MammalNet is comprised of 539 hours of annotated videos, which is than that of the largest available animal behavior dataset. It ${\\\\sim}10$ times longer contains 18,346 videos depicting 12 fundamental high-level behaviors from hundreds of mammal species. Importantly, it focuses on 12 higher-order animal behaviors that are the focus of previous animal behavior literature [ 3 ,8 ,17 ,33 ], rather than atomic actions. MammalNet also categorizes animals according to the scientific taxonomy available in Wikipedia, as we show in Fig. 1 ; hence the dataset can be flexibly expanded in the future by following the same protocols. It includes videos of approximately 800 mammal species in 173 mammal categories. We establish three benchmarks inspired by ecological research needs - standard animal & behavior classification, compositional low-shot animal & behavior recognition, and behavior detection – to promote future study in animal behavior understanding.  \\n\\nThrough our experiments, we find that: (1) Correctly recognizing the animals and behaviors is a challenging task even for the state-of-the-art models, especially for less-frequent animals. The top-1 per-class accuracy is 32.5 for animal recognition, 37.8 for behavior recognition, and 17.8 for their joint recognition in our best-performing model. (2) Behavior recognition for unseen animals can be transferred from observations of other seen animals due to their similar features such as appearance and movement style, which can help in studies of animals with less available data. However, to achieve more accurate behavior recognition, having access to videos of the target animals and behaviors is still crucial.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689464877780,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 1,\n",
       "   'chunk_text': '# 2. Related Work\\nAutomatic animal recognition and behavior detection can help humans monitor and efficiently process animal behavior data [ 7 ,22 ,32 ,34 ,40 ,47 ,48 ]. It can massively reduce the labour cost from manually collecting and analyzing animal activities. During the past few years, many datasets [ 18 ,20 ,29 ,38 ] have been introduced to develop foundations for animal behavior research. We systematically analyze the previous datasets and summarize several important limitations that prevent them from being used for large-scale animal recognition and behavior understanding:  \\n\\nLack of behavior understanding. Many previous datasets only focus on animal recognition or pose estimation from images, but lack behavior learning. For example, iNaturalist [ 45 ] collects 859,000 images covering more than 5,000 different types of plants and animals. NABird [ 44 ] collects 48,562 images of 555 different North American bird species. Also, some works focus on narrow animal recognition such as dogs [ 27 ], birds [ 46 ,50 ] or cats [ 13 ]. On the other hand, many works also focus on pose estimation [ 10 ,18 ,21 ,31 ,41 ]and animal face detection [ 26 ,39 ,51 ]. They are generally not applicable to learning animal behavior.  \\n\\nLack of taxonomic diversity for different behaviors. Previous animal behavior datasets have minimal taxonomic coverage - often containing just a single animal species. For example, there are existing behavior recognition datasets for elephants [ 28 ], sheep [ 37 ], monkeys [ 5 ], tigers [ 16 ],  \\n\\n<html><body><table><tr><td rowspan=\"3\">Datasets</td><td colspan=\"8\">DatasetProperties</td><td colspan=\"3\">Tasks</td></tr><tr><td>Available? Publicly</td><td>guided Animal Taxonomy- Annotation?</td><td>of Videos No.</td><td>of Actions No.</td><td>Behaviors No.of</td><td>No. of Animal Categories</td><td>No. of Mammal Categories</td><td>Total Duration</td><td>Classification Animal</td><td>Action/Behavior Recognition</td><td>Action/Behavior Detection</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Wild Felines [20]</td><td>×</td><td></td><td>2,700 10,600</td><td>3 7</td><td></td><td>3 32</td><td>3 11</td><td></td><td></td><td></td><td>X</td></tr><tr><td>Wildlife Actions[29]</td><td>×</td><td></td><td>4,301</td><td>140</td><td></td><td>850</td><td></td><td>50 (h)</td><td>×</td><td></td><td>X</td></tr><tr><td>Animal Kingdom [36]</td><td>√</td><td></td><td></td><td></td><td>12</td><td>173</td><td>173</td><td>539 (h)</td><td></td><td></td><td>X</td></tr><tr><td>MammalNet (ours)</td><td></td><td></td><td>18,346</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table></body></html>  \\n\\nTable 1. The comparison among existing animal behavior understanding video datasets. Compared to other datasets, MammalNet annotates the animals by following the scientific mammal taxonomy, focuses on more high-level behavior recognition, has the largest number of mammal categories, collects the largest number of animal behavior videos, totalling 539 hours, and also enables behavior detection tasks.  \\n\\netc. While useful for the studied species, these datasets do not enable the exploration of behavior across species which is necessary to scale up behavior identification without requiring training examples of every possible combination of species and behavior.  \\n\\nLack of taxonomy-guided animal annotation. Previous animal behavior datasets either do not include animal recognition as a task [ 36 ] or group species according to subjective as opposed to scientific criteria [ 29 ,36 ]. In contrast, our dataset collects and annotates the videos according to the scientific mammal taxonomy. By following this taxonomy we allow exploration of behavior from an evolutionary perspective, as well as enable standardized and consistent dataset expansion under the same protocol.  \\n\\nActions vs. Behaviors. Previous works mostly focus on atomic action recognition [ 20 ,29 ,36 ]. For example, some of their action classes are walk, stand still, fly, etc . In contrast, MammalNet focuses on higher-level animal behaviors, such as hunt, feed baby, etc . Behavior here denotes the main activity instance during a period, and it is usually composed of multiple atomic actions. These complex behaviors are needed to describe and summarize video activity bouts in a manner which is valuable for ecological research.  \\n\\nWe further compare our MammalNet dataset with the other existing animal behavior understanding video datasets, and summarize the key difference in Table 1 .\\n\\n# 3. Constructing MammalNet\\nThe goal of MammalNet is to provide a large-scale mammal video dataset that benchmarks both animal and behavior recognition. In this section, we discuss our dataset construction protocol, including the choice of scientific animal taxonomy, crowdsourced annotations, and performing manual quality control during video collection and annotation. Finally, we describe the statistical profile of MammalNet.  \\n\\n  \\nFigure 2. A subset of the mammal taxonomy of MammalNet. It includes 3 orders, 11 families, and 45 genera.\\n\\n# 3.1. Animal Taxonomy and Behavior Collection\\nAnimal taxonomy construction. Accurate animal recognition is one of the main goals defined by MammalNet. To ensure this task is scientifically relevant, we aimed to collect and structure our list of target animals based on the mammal taxonomy. First, we collected a diverse list of mammals, approximately 800 mammal species and sub-species, from the National Geographic [2 ] and Animal A-Z [1 ] websites. Next, we manually mapped each mammal onto the taxonomic structure (including class, order, family, genus, tribe, sub-family and species) in mammal taxonomy from Wikipedia. In total, the classes included in MammalNet cover 17 orders, 69 families, and 173 mammal categories. A taxonomic subset is visualized in Fig. 2 .  \\n\\nBehavior collection. We aim to enable the study of complex, high-level animal behavior as opposed to the simpler atomic actions emphasized in previous work [ 29 ,36 ]. Behavior here represents the major activity being displayed during a period of a video, and can be viewed as a series of atomic actions collectively serving a higher-level purpose. For example, hunting behavior, as defined in MammalNet, can often be decomposed into running, chasing and killing actions, etc. Identification of behaviors at this level of definition is more useful for ecologists and zoologists [ 3 ,8 ,17 ,33 ]compared to atomic actions.  \\n\\n  \\nFigure 3. The examples for the annotated target behavior boundaries. The frames marked in red boxes denote the annotated temporal boundaries for the target behavior.  \\n\\nInspired by previous biological and ecological studies [ 3 ,8 ,17 ], we consider 12 fundamental mammal behaviors under 5 different groups in our study. They are respectively: Foraging behaviors : eat food, drink water, hunt.   \\nReproductive behaviors : mate, feed baby, give birth. Hygiene behaviors : groom.   \\nAgonistic behaviours : fight.   \\nMaintenance behaviors : urinate, defecate, sleep, vomit.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689513112278,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 2,\n",
       "   'chunk_text': '# 3.2. Video Collection and Quality Assurance\\nDataset curation is usually a costly process requiring a lot of manual annotation by humans. Some datasets are annotated by domain experts to encourage more reliable labeling, particularly for challenging tasks, but this expertise comes at a much higher cost and is thus hard to scale. We adapted a semi-automatic crowdsourcing approach to collect and annotate our datasets, inspired by many previous works [ 9 ,15 ,23 ,49 ].  \\n\\nOnline video retrieval. Our goal was to collect videos depicting each animal in our database performing each of the 12 focal behaviors. To achieve this, we queried YouTube with the text combinations of each animal common name and behavior, e.g., tiger hunting , and downloaded videos where the title included our queried animal name and behavior. However, some animal names are too rare to generate enough relevant videos, e.g., Brown hyena or Spotted hyena . In such cases, we used a more common name, hyena , to represent the union of those animals. In order to retrieve more relevant videos from the search engine, we also expanded each animal with their synonyms as given in Wikipedia. For example, we expanded the original artic fox with other equivalent names such as white fox ,polar fox and snow fox . Each video was downloaded at its highest available resolution.  \\n\\nData filtering. During video retrieval, we downloaded the videos that are accessible for people ${<}16$ years old to avoid the videos with violent content. We also prioritized videos that are shorter than 10 minutes in duration to limit the total storage.  \\n\\nHowever, some videos might have irrelevant content due to the inaccuracy of text-based retrieval. For example, the downloaded videos might 1) display cartoon or toy animals or unrealistic environments such as games or movies, 2) display a static image instead of a continuous video, 3) involve a lot of human-animal interaction, e.g., human feeding an animal, as opposed to focusing on animal behavior, 4) not contain the specified animal and/or behavior.  \\n\\nTo alleviate these issues, we employed Amazon Mechanical Turk workers to verify the presence of the animal and behavior, and identify other quality issues. We assigned each video to three different workers and provided them with the pictures of an animal with its common name, and an expected behavior. We asked the workers to verify if this animal and behavior indeed appear in the video. Only the videos that “pass” by all the three workers were kept for the following behavior localization annotation. Before the workers started the verification, we first provided them the verification instructions and asked them to complete a corresponding qualification test with 20 multiple choice questions to ensure only qualified workers could participate in our task.\\n\\n# 3.3. Animal Behavior Localization Annotation\\nThe target behavior typically does not span the whole video. To localize the part where the target behavior is actually occurring in the video, we asked the AMT workers to manually annotate the respective temporal boundaries. Namely, we asked five different qualified workers to annotate the start and end frames for the target behavior in each video. In the end, each video received at least 5 annotated behavior boundaries. To achieve robust annotation agreement, we used the complete linkage algorithm [ 14 ] to cluster different temporal boundaries and merge them into one or several more stable ones that received multiple agreements. Note, that a single video might have multiple discrete occurrences of a given behavior, and thus have multiple boundary definitions. We show several examples for annotated target behavior boundaries in Fig. 3 .  \\n\\n  \\nFigure 4. Number of videos per each mammal category. We rank the categories according to their trimmed videos frequency.  \\n\\n  \\nFigure 5. Number of videos per each behavior. We rank the behavior according to their trimmed videos frequency.\\n\\n# 3.4. Recognition at lowest feasible taxonomic level\\nWe categorize animals according to the lowest and feasible taxonomic classification, rather than the species level, for the following reasons: 1) YouTube videos contain the ambiguous and inexpert species labels. 2) We are not using expert annotators, and even experts often cannot reliably identify animals at the species or even genus level based on crowdsourced or field images [ 25 ]. Thus, it is more practical to classify animals in our dataset to the lowest feasible taxonomic level rather than species. As a result, our taxonomy contains 173 distinguishable taxonomic classification levels including: sub-family, tribe and genus.\\n\\n# 3.5. MammalNet Statistics\\nThe final MammalNet dataset contains 18,346 videos (539 hours); after we trim the videos according to the annotated behavior boundaries, it increases to 20,033 trimmed video instances (394 hours). In total, it covers 173 mammal categories, 69 families and 17 orders in our dataset, with the total of 173 mammal categories defined for our recognition tasks. MammalNet also contains 12 different common behaviors. The average duration for the untrimmed and trimmed videos is 106 and 77 seconds, respectively. Over $54\\\\%$ of videos reach HD resolutions $(1280\\\\times720)$ . We demonstrate the data distribution in terms of each animal category and behavior in Fig. 4 and 5 . We observe that the number of collected videos per type follow a long-tail distribution.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689556103896,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 3,\n",
       "   'chunk_text': '# 4. Experimental Results\\nWe construct three main tasks on MammalNet: 1) Standard animal and behavior classification on trimmed videos, 2) Compositional low-shot animal and behavior recognition on trimmed videos, and 3) Behavior detection on untrimmed videos. In both the classification and detection tasks, we baseline several state-of-the-art models [ 19 ,30 ,53 ] that have been successfully applied to human action recognition and detection. In the following, we describe the formulation of each challenge and provide baselines and analysis.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689601192666,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 4,\n",
       "   'chunk_text': '# 4.1. Standard Animal and Behavior Classification on Trimmed Videos\\nThis task explores classification of both the primary behavior that occurs in a trimmed video and the animal that performs the behavior. We report the top-1 per-example and per-class accuracy for all the baseline models.  \\n\\nMany, medium, few splits. To capture the effect of the longtailed nature of the MammalNet dataset, we group the animal, behavior, and their composition classes into many ,medium ,and few based on their frequency, and report the average  \\n\\n<html><body><table><tr><td rowspan=\"3\">Baselines</td><td colspan=\"4\">Animal Classification</td><td colspan=\"4\">Behavior Classification</td><td colspan=\"4\">JointClassification</td></tr><tr><td>Many 12</td><td>Medium 28</td><td>Few 133</td><td>All 173</td><td>Many 4</td><td>Medium 4</td><td>Few 4</td><td>All 12</td><td>Many 33</td><td>Medium 180</td><td>Few 823</td><td>All 1036</td></tr><tr><td>SlowFast [19]</td><td>49.6</td><td>35.6</td><td>9.5</td><td>17.2</td><td>39.0</td><td>27.6</td><td>14.9</td><td>27.2</td><td>27.2</td><td>19.4</td><td>4.4</td><td>8.6</td></tr><tr><td>C3D [42]</td><td>48.6</td><td>35.3</td><td>10.0</td><td>17.5</td><td>38.2</td><td>27.8</td><td>11.7</td><td>25.9</td><td>28.1</td><td>18.8</td><td>4.5</td><td>8.6</td></tr><tr><td>I3D [11]</td><td>48.8</td><td>34.9</td><td>10.5</td><td>17.8</td><td>39.5</td><td>27.2</td><td>14.8</td><td>27.2</td><td>29.6</td><td>20.5</td><td>4.4</td><td>8.9</td></tr><tr><td>MViT V2[30]</td><td>48.5</td><td>35.5</td><td>12.9</td><td>19.7</td><td>42.3</td><td>29.2</td><td>11.6</td><td>27.7</td><td>29.5</td><td>19.6</td><td>4.8</td><td>9.0</td></tr><tr><td>SlowFast*</td><td>58.3</td><td>43.1</td><td>16.6</td><td>24.5</td><td>45.1</td><td>32.7</td><td>14.8</td><td>30.9</td><td>38.6</td><td>23.5</td><td>7.3</td><td>12.1</td></tr><tr><td>C3D*</td><td>58.3</td><td>45.4</td><td>19.1</td><td>26.8</td><td>44.6</td><td>36.0</td><td>15.9</td><td>32.2</td><td>38.0</td><td>26.4</td><td>8.4</td><td>13.5</td></tr><tr><td>I3D*</td><td>58.6</td><td>42.9</td><td>16.9</td><td>24.8</td><td>46.3</td><td>35.0</td><td>14.8</td><td>32.1</td><td>38.3</td><td>24.5</td><td>8.6</td><td>13.3</td></tr><tr><td>MViTV2*</td><td>66.7</td><td>56.0</td><td>23.4</td><td>32.5</td><td>50.9</td><td>42.4</td><td>20.0</td><td>37.8</td><td>46.2</td><td>33.0</td><td>11.8</td><td>17.8</td></tr></table></body></html>  \\n\\nTable 2. Per-class Top-1 accuracy for animal, behavior and their joint prediction.\\\\* denotes the initialization from the model pretrained on Kinetics 400 [ 24 ]. Transfer learning from the human action to the animal behavior recognition receives considerable performance gain. Best performance for each split has been highlighted in bold .  \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Baselines</td><td colspan=\"4\">AnimalClassification</td><td colspan=\"4\">BehaviorClassification</td><td colspan=\"4\">JointClassification</td></tr><tr><td>Many 12</td><td>Medium 28</td><td>Few 133</td><td>All 173</td><td>Many 4</td><td>Medium 4</td><td>Few 4</td><td>All 12</td><td>Many 33</td><td>Medium 180</td><td>Few 823</td><td>All 1036</td></tr><tr><td>SlowFast</td><td>58.3</td><td>43.1</td><td>16.6</td><td>24.5</td><td>45.1</td><td>32.7</td><td>14.8</td><td>30.9</td><td>38.6</td><td>23.5</td><td>7.3</td><td>12.1</td></tr><tr><td>C3D</td><td>58.3</td><td>45.4</td><td>19.1</td><td>26.8</td><td>44.6</td><td>36.0</td><td>15.9</td><td>32.2</td><td>38.0</td><td>26.4</td><td>8.4</td><td>13.5</td></tr><tr><td>I3D</td><td>58.6</td><td>42.9</td><td>16.9</td><td>24.8</td><td>46.3</td><td>35.0</td><td>14.8</td><td>32.1</td><td>38.3</td><td>24.5</td><td>8.6</td><td>13.3</td></tr><tr><td>MViTV2</td><td>66.7</td><td>56.0</td><td>23.4</td><td>32.5</td><td>50.9</td><td>42.4</td><td>20.0</td><td>37.8</td><td>46.2</td><td>33.0</td><td>11.8</td><td>17.8</td></tr></table></body></html>  \\n\\nTable 3. Per-class Top-1 accuracy for animal, behavior and their joint prediction.\\\\* denotes the initialization from the model pretrained on Kinetics 400 [ 24 ]. Transfer learning from the human action to the animal behavior recognition receives considerable performance gain. Best performance for each split has been highlighted in bold .  \\n\\nTable 4. Per-example accuracy for animal, behavior and their joint prediction. \\\\* denotes the initialization from the pretrained model.   \\n\\n\\n<html><body><table><tr><td>Baselines</td><td>Animal</td><td>Behavior</td><td>Joint</td></tr><tr><td>SlowFast [19]</td><td>35.4</td><td>34.2</td><td>17.4</td></tr><tr><td>C3D [42]</td><td>35.0</td><td>33.5</td><td>17.1</td></tr><tr><td>I3D [11]</td><td>35.2</td><td>34.3</td><td>17.9</td></tr><tr><td>MViT V2 [30]</td><td>35.6</td><td>36.8</td><td>18.0</td></tr><tr><td>SlowFast*</td><td>43.0</td><td>39.4</td><td>22.8</td></tr><tr><td>C3D*</td><td>44.4</td><td>40.3</td><td>24.6</td></tr><tr><td>I3D*</td><td>43.4</td><td>41.2</td><td>24.0</td></tr><tr><td>MViTV2*</td><td>52.6</td><td>46.6</td><td>30.6</td></tr></table></body></html>  \\n\\nper-class accuracy bands chosen based on the frequency percentiles. For animal categories this is broken down as many : top $7\\\\%$ frequent classes, medium : middle $16\\\\%$ classes, and few : the remaining $77\\\\%$ classes. For behavior, many :top $33\\\\%$ frequent classes, medium : middle $33\\\\%$ , and few :the remaining $33\\\\%$ classes. For joint classification, many :top $3\\\\%$ frequent classes, medium : middle $17\\\\%$ classes, few :the remaining $80\\\\%$ classes. We show the number of classes per each split in Table 3 .  \\n\\nDataset setup. We randomly split the examples from each animal-behavior category into $70\\\\%$ for training, $10\\\\%$ for validation, and $20\\\\%$ for testing, and it results in 14,554 training, 1,638 validation, and 3,841 testing videos, respectively.  \\n\\nBaselines. We compare SlowFast [ 19 ], I3D [ 11 ], C3D [ 42 ]and MViT V2 [ 30 ] models on our tasks. These models are evaluated in two versions: 1) Training with random initialization 2) Initializing with weights from a model pretrained on Kinetics 400 [ 24 ]. These methods were originally designed for human action recognition and hence do not have an ability to predict both an action and a subject by default. To accommodate them into our joint prediction setting, we have two task heads, one for animal category recognition and one for behavior recognition. We compute the joint loss, shown in Fig. $\\\\mathcal{L}_{j o i n t}$ , for both animal and behavior classification as 1 . We tune all the hyper-parameters on the validation data. The final hyper-parameters for each model are provided in the supplement. The loss is defined as:  \\n\\n$$\\n\\\\mathcal{L}_{j o i n t}=-\\\\frac{1}{M}\\\\sum_{i}^{M}y_{i}^{a}\\\\mathrm{log}(p_{i}^{a})-\\\\frac{1}{N}\\\\sum_{j}^{N}y_{j}^{b}\\\\mathrm{log}(p_{j}^{b})\\n$$  \\n\\nwhere $M$ is the number of animal classes, and $N$ is the number of behavior classes, $p_{i}^{a}$ and $y_{i}^{a}$ denote the animal prediction probability and ground truth label for the category $i$ ,$p_{y}^{b}$ and $y_{j}^{b}$ denote the behavior prediction probability and ground truth label for the category $j$ .  \\n\\n<html><body><table><tr><td rowspan=\"3\">Baselines</td><td colspan=\"6\">Compositional Low-Shot Behavior Classification</td></tr><tr><td colspan=\"2\">O-shot</td><td colspan=\"2\">1-shot</td><td colspan=\"2\">5-shot</td></tr><tr><td>Per-example A/B</td><td>Per-class A/B</td><td>Per-example A/B</td><td>Per-class A/B</td><td>Per-example A/B</td><td>Per-class A/B</td></tr><tr><td>C3D*</td><td>27.4/23.7</td><td>18.3/16.1</td><td>29.2/25.5</td><td>21.1/19.9</td><td>33.3/29.3</td><td>25.2/23.0</td></tr><tr><td>I3D*</td><td>25.3/23.9</td><td>16.7/15.2</td><td>26.8/25.7</td><td>16.9/18.3</td><td>30.5/28.3</td><td>21.7/21.9</td></tr><tr><td>SlowFast*</td><td>26.2/24.8</td><td>17.9/16.3</td><td>26.5/26.3</td><td>16.7/19.0</td><td>29.6/29.2</td><td>22.5/22.4</td></tr><tr><td>MViT V2*</td><td>32.2/26.2</td><td>20.7/18.1</td><td>33.7/28.9</td><td>22.9/22.5</td><td>39.3/31.7</td><td>31.0/26.0</td></tr></table></body></html>  \\n\\nTable 5. Compositional low-shot animal and behavior recognition. \\\\* denotes the initialization from the model pretrained on Kinetics 400 [ 24 ]. “A” denotes the animal category and “B” denotes the behavior category. The best performance per each column has been highlighted in bold .  \\nTable 6. The results for behavior detection. We report mAP at the IoU thresholds of [0.5:0.1:0.9]. Average mAP is computed by averaging different tIoU thresholds.   \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Baselines</td><td colspan=\"6\">mAP</td></tr><tr><td>0.50</td><td>0.60</td><td>0.70</td><td>0.80</td><td>0.90</td><td>Avg.</td></tr><tr><td>CoLA [52]</td><td>26.02</td><td>22.70</td><td>18.98</td><td>13.46</td><td>3.05</td><td>15.81</td></tr><tr><td>TAGS [35]</td><td>23.09</td><td>20.97</td><td>19.09</td><td>16.98</td><td>12.56</td><td>17.63</td></tr><tr><td>ActionFormer [53]</td><td>28.48</td><td>26.14</td><td>23.17</td><td>18.69</td><td>10.48</td><td>20.07</td></tr></table></body></html>  \\n\\nExperimental results. The results for per-class and perexample classification are summarized in Tables 3 and 4 ,respectively. We find that MViT v2 is able to achieve competitive results for all the splits. The best top-1 joint per-class accuracy is 17.8 and per-example accuracy is 30.6, which points to significant room for improvement on these challenging tasks. We also observe that transfer learning from the model that is pretrained on Kinetics 400, a human action recognition dataset, improves both the animal and behavior classification accuracy (with MViT v2, this corresponds to a per-class accuracy gain from 19.7 to 32.5 for animal classification, and from 27.7 to 37.8 for behavior classification). Additionally, we find that performance gain from pretraining is higher for frequently occurring animals and behaviors, indicated by the results shown in many ,medium ,and $f e w$ splits. Accurately predicting low-frequency animal and behavior categories remains a significant challenge.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689649427164,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 5,\n",
       "   'chunk_text': '# 4.2. Compositional Low-shot Animal and Behavior Classification on Trimmed Videos\\nIt is hard to find a sufficient number of labeled behavior samples for all the animals in our collected taxonomy. For example, numbat and florida panther have very few behavior annotations. However, it is plausible to imagine that behaviors can be transferred among different animals that have similar appearances and movement styles. Also the animal recognition under different behaviors (hunting, fighting) can be mutually transferred. To investigate these phenomena, we design the compositional low-shot animal and behavior classification task.  \\n\\nDataset setup. We first select the animal-behavior compositional classes that contain more than 5 examples and allocate $25\\\\%$ of classes to the test set (4,088 videos). For the remaining classes (the other $75\\\\%$ of classes and those classes with $\\\\leq5$ examples), w ndomly designate $90\\\\%$ of classes as the training set and 10% as the validation set (898 videos). Under the low-shot scheme, for each compositional class, we randomly sample 5 examples from the test set and move 0, 1, or 5 of them into the training set for the zero-shot, 1-shot, and 5-shot setup, respectively (14,377, 14,511 and 15,047 training videos). The train, val and test sets consist of 983, 53, and 134 compositional classes, respectively.  \\n\\nBaselines. We evaluate the compositional low-shot classification with the SlowFast [ 19 ], I3D [ 11 ], C3D [ 42 ] and MViT V2 [ 30 ] models under the joint loss. We initialize these models with the weights pretrained on Kinetics 400 [ 24 ].  \\n\\nExperimental results. We summarize the results in Table 5 .It shows that MViT v2 consistently achieves the best performance under our low-shot setup. The behavior classification can still achieve 26.2 per-example and 18.1 top-1 accuracy under the zero-shot setting for MViT v2 model. This indicates that behavior classification is transferable from other animals in some cases. Additionally, we observe that its performance is improved to 31.7 per-example and 26.0 per-class top-1 accuracy under 5-shot setup, indicating that training on more videos with the behaviors from the same animal is still necessary. A similar phenomenon is observed to the few-shot animal recognition.\\n\\n# 4.3. Behavior Detection on Untrimmed Videos\\nThis task is to detect the behavior in untrimmed videos. The behavior detection algorithm should correctly detect the temporal range for the primary activity presented in the video. We follow previous temporal action localization works [ 9 ,53 ] to benchmark this task. We report the mean Average Precision (mAP) with different temporal intersections over the union (tIoU) thresholds [0.5:0.1:0.9] as our evaluation metric. We also report the average mAP averaging across different tIoUs.  \\n\\nDataset setup. Similar to the previous standard animal and behavior classification, we follow the [train:0.7, val:0.1, test:0.2] ratios to split the untrimmed videos at the animalbehavior composition level. This results in 13,318 videos for training, 1,486 for validation and 3,542 for testing. We tune all the hyper-parameters based on the validation set.  \\n\\n  \\nFigure 6. The visualization presents various instances of joint animal and behavior classification. In the third column, accurate predictions are displayed, while the fourth, fifth, and sixth columns showcase mispredicted examples where either the animal or the behavior does not correspond to the correct prediction.  \\n\\n  \\nFigure 7. The visualization for behavior detection examples.  \\n\\nBaselines. We evaluate our datasets with the baseline models such as ActionFormer [ 53 ], TAGS [ 35 ], and CoLA [ 52 ]. To produce the features for our MammalNet videos, we first finetune a two-stream I3D [ 11 ] model, that is originally pretrained on ImageNet [ 15 ] and Kinetics 400 [ 24 ], on our dataset, and then extract the RGB and optical flow features for each video. We concatenate these two features together as the model input.  \\n\\nExperimental results. We show the behavior detection results in Table 6 . Among all the baselines, ActionFormer demonstrates the most competitive performance with an average mAP of 20.07, and also achieves $28.48\\\\;\\\\mathrm{mAP}$ for the threshold of 0.5. Overall, it is clear to see that the behavior detection task is still very challenging for current methods.\\n\\n# 5. Analysis\\nDemonstration of animal and behavior classification. We sample some prediction examples from MViT v2 [ 30 ] for the joint categories Acinonyx hunt ,Canis eat and Antilopini fight . We demonstrate one correct prediction and three mispredicted examples where the model mis-recognizes the animal or behavior or both of them in Fig. 6 .  \\n\\nDemonstration of behavior detection: We visualize behavior detection results in Fig. 7 from ActionFormer [ 53 ]. The top example shows correctly predicted behavior with a proposal closely aligned with the ground-truth. The bottom example shows a misclassified behavior, and it mistakenly predicts the hunt behavior as fight .  \\n\\nJoint animal and behavior recognition vs. separate recognition. We also train the model for recognizing the animal and behavior separately and provide the results in the Table 1 of the supplementary. Comparing with the joint recognition, we find that training a system to recognize the animal and behavior together can improve the behavior recognition under separate training by dicates that being capable of understanding the animal types ${\\\\sim}2.2$ per-class accuracy. This incan benefit behavior prediction. However, the results also indicate that predicting animal only can be more useful in recognizing animal types.  \\n\\nDataset bias. Our dataset is downloaded from YouTube channels with diverse backgrounds and video quality, and it might differ from video datasets collected for behavioral or ecological studies. The videos on YouTube might also exhibit potential biases:  \\n\\n1) Over-representation of some behaviors and underrepresentation of others. For example, people might prefer watching videos with fight or eat food activities, and these two behaviors are more likely to be over-represented, while urinate and defecate behaviors are less interesting to humans and hence are underrepresented on YouTube. However, they are still important in informing conservation-related actions to protect the environment.  \\n\\n2) Bias towards captive animals or wild animals that are habituated to humans. We find that many videos are shot at zoos, farms, and homes, etc. These animals may display different behaviors than wild or non-habituated animals.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023},\n",
       "  {'id': 454847689696613086,\n",
       "   'paper_id': '6464b059d68f896efa35a978',\n",
       "   'paper_title': 'MammalNet: A Large-scale Video Benchmark for Mammal Recognition and Behavior Understanding',\n",
       "   'chunk_id': 6,\n",
       "   'chunk_text': '# 6. Conclusion\\nWe introduced MammalNet, a large-scale video dataset for mammal recognition and behavior understanding. We have collected videos for hundreds of different mammals and structured them by following the scientific mammal taxonomy. MammalNet consists of 18,346 untrimmed videos covering 173 mammal categories and 12 common behaviors. We established three challenges: standard animal and behavior classification, compositional low-shot animal and behavior classification, and behavior detection. Through our experiments, we found that the accurate recognition of animals at scale and their common behaviors is very challenging even with current state-of-the-art models, especially when the dataset has a long-tail distribution. We also found that learning to recognize the behavior of unseen animals is possible via transfer from the other seen animals. To promote further research and development in the field of animal behavior study, we have open-sourced all of our data and code to the research community.  \\n\\nAcknowledgement : This work is supported by KAUST BAS/1/1685-01-01 and KAUST FCC/1/1973-58-01 (Red Sea Research Center), DARPA’s SemaFor and PTG programs, the Caltech Resnick Sustainability Institute, and Germany’s Excellence Strategy–‘Centre for the Advanced Study of Collective Behaviour’ EXC 2117-422037984.',\n",
       "   'original_filename': 'Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db',\n",
       "   'year': 2023}]]"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "query = Query()\n",
    "await query.query_by_title_like('''3': 'All in One Framework for Multimodal Re-identification in the Wild.Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db,2024, chunk 5',\n",
    " '4': 'Multimodal Machine Learning in Image-Based and Clinical Biomedicine: Survey and Prospects.Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db,2024, chunk 9',\n",
    " '5': 'MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning.Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db,2023, chunk 1',\n",
    "''')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# find_statement"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/env python3\n",
    "# -*- coding: utf-8 -*-\n",
    "\"\"\"\n",
    "该模块通过调用 LLM 模型，根据给定的研究主题和文本内容查找相关的语句引用，\n",
    "并支持并发处理文本段。所有功能均封装在 CitationProcessor 类中。\n",
    "\"\"\"\n",
    "\n",
    "import asyncio\n",
    "import logging\n",
    "from asyncio import Semaphore\n",
    "from pathlib import Path\n",
    "from typing import List, Optional\n",
    "\n",
    "from jinja2 import Environment\n",
    "from pyaml_env import parse_config\n",
    "import json_repair\n",
    "\n",
    "from research_agent.core.config import Config\n",
    "from research_agent.core.general_llm import LLM\n",
    "from research_agent.core.query import Query\n",
    "from research_agent.core.utils import tokenize_sentences\n",
    "\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "prompt_file = r\"D:\\GoodStudy\\FX15_reference_2\\summary-generation-match\\research_agent\\core\\prompts\\1.jinja\"\n",
    "class FindStatementCitation:\n",
    "    \"\"\"\n",
    "    通过调用 LLM 模型，根据给定的研究主题和文本内容查找相关的语句引用，\n",
    "    并提供对文本段并发处理的功能。\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, base_path: Optional[str] = None, max_concurrent: int = 15):\n",
    "        \"\"\"\n",
    "        初始化 FindStatementCitation 类，加载 LLM 模型配置和提示模板，并设置并发控制信号量。\n",
    "\n",
    "        :param base_path: 提示模板所在的基础路径，默认为当前文件所在目录下的 \"prompts\" 文件夹\n",
    "        :param max_concurrent: 最大并发数\n",
    "        \"\"\"\n",
    "        # 加载配置文件并获取默认模型配置\n",
    "        configs = parse_config(Config.YAML_CONFIG)\n",
    "        self.llm = LLM(config=configs[Config.DEFAULT_MODEL])\n",
    "        self.query = Query()\n",
    "\n",
    "        try:\n",
    "            with open(prompt_file, \"r\", encoding=\"utf-8\") as f:\n",
    "                template_content = f.read()\n",
    "        except Exception as e:\n",
    "            logger.error(f\"加载提示模板文件失败：{prompt_file}，错误信息：{e}\")\n",
    "            raise\n",
    "\n",
    "        # 使用 Jinja2 加载模板\n",
    "        self.prompt_template = Environment().from_string(template_content)\n",
    "        self.semaphore = Semaphore(max_concurrent)\n",
    "\n",
    "    def _prepare_prompt_messages(self, topic: str, section: str) -> List[dict]:\n",
    "        \"\"\"\n",
    "        准备生成查找语句引用所需的提示消息。\n",
    "\n",
    "        :param topic: 研究主题\n",
    "        :param section: 文本内容\n",
    "        :return: 包含系统和用户提示信息的字典列表\n",
    "        \"\"\"\n",
    "        system_prompt = self.prompt_template.render(role=\"system\", topic=topic)\n",
    "        user_prompt = self.prompt_template.render(\n",
    "            role=\"user\",\n",
    "            survey_draft=tokenize_sentences(section),\n",
    "            topic=topic\n",
    "        )\n",
    "        return [\n",
    "            {\"role\": \"system\", \"content\": system_prompt},\n",
    "            {\"role\": \"user\", \"content\": user_prompt},\n",
    "        ]\n",
    "\n",
    "    async def find_statement_citation(self, topic: str, section: str, max_retries: int = 3) -> List[str]:\n",
    "        \"\"\"\n",
    "        根据研究主题和文本内容调用 LLM 生成回答，提取引用语句。\n",
    "\n",
    "        :param topic: 研究主题\n",
    "        :param section: 预处理后的文本内容\n",
    "        :param max_retries: 最大重试次数\n",
    "        :return: 模型返回的引用语句列表\n",
    "        \"\"\"\n",
    "        prompt_messages = self._prepare_prompt_messages(topic, section)\n",
    "        for attempt in range(max_retries):\n",
    "            try:\n",
    "                response = await self.llm.completion(prompt_messages)\n",
    "                response_data = json_repair.loads(response)\n",
    "                if isinstance(response_data, dict) and \"statements\" in response_data:\n",
    "                    return response_data[\"statements\"]\n",
    "                if isinstance(response_data, list) and len(response_data) > 0 and isinstance(response_data[0], dict) and \"statement_abstract\" in response_data[0] and \"keywords\" in response_data[0] and \"evidence_spans\" in response_data[0]:\n",
    "                    return response_data\n",
    "            except Exception as e:\n",
    "                logger.error(f\"调用 LLM 模型时出错：{e}，尝试次数：{attempt + 1}\")\n",
    "                if attempt < max_retries - 1:\n",
    "                    await asyncio.sleep(0.5)\n",
    "                else:\n",
    "                    logger.error(\"达到最大重试次数，操作失败。\")\n",
    "                    return []\n",
    "\n",
    "    async def process_section(self, section: str, topic: str) -> Optional[List[str]]:\n",
    "        \"\"\"\n",
    "        异步处理单个文本段：\n",
    "          1. 对文本段进行句子分割和格式化；\n",
    "          2. 调用 LLM 模型查找引用语句。\n",
    "\n",
    "        :param section: 原始文本段\n",
    "        :param topic: 研究主题\n",
    "        :return: 模型返回的引用语句列表，出现异常时返回 None\n",
    "        \"\"\"\n",
    "        async with self.semaphore:\n",
    "            try:\n",
    "                citations = await self.find_statement_citation(topic=topic, section=section)\n",
    "                return citations\n",
    "            except Exception as e:\n",
    "                logger.error(f\"处理文本段时出错：{e}\")\n",
    "                return None\n",
    "\n",
    "    async def process_all_sections(self, sections: List[str], topic: str) -> List[List[str]]:\n",
    "        \"\"\"\n",
    "        并发处理所有文本段，保持原有顺序并返回每个文本段对应的引用语句结果。\n",
    "\n",
    "        :param sections: 文本段列表\n",
    "        :param topic: 研究主题\n",
    "        :return: 每个文本段返回的引用语句列表集合（过滤掉处理失败的结果）\n",
    "        \"\"\"\n",
    "        tasks = [self.process_section(section, topic) for section in sections]\n",
    "        results = await asyncio.gather(*tasks, return_exceptions=True)\n",
    "        return [result for result in results if result is not None]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n## 1 Introduction\\n\\nThe technological roadmap of multi-model large models is a critical area of research that has gained significant attention due to its potential to revolutionize various fields, including natural language processing, computer vision, and robotics. This research survey aims to analyze the evolution of multi-modal large models, evaluate the methodological approaches and architectures employed, and propose future directions for research and development. The scope of this survey encompasses the foundational theories of multi-modal learning, the breakthroughs and paradigm shifts that have shaped the field, and the recent advancements and emerging trends. By synthesizing key literature developments and identifying gaps, this survey will provide a comprehensive overview of the current state of multi-modal large models. The methodology of this survey involves a critical analysis of existing research, the identification of key trends and patterns, and the formulation of research questions and hypotheses. The temporal scope of this survey is from the early developments of multi-modal learning to the most recent advancements in the field. The unique angle of this survey is to focus on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models. This survey will contribute to the field by providing a comprehensive overview of the current state of multi-modal large models, identifying key trends and patterns, and proposing future directions for research and development.\\n\\n ## 2 Evolution of Multi-Modal Large Models'"
      ]
     },
     "execution_count": 112,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "introduction = pipeline.merged_sections[0]\n",
    "introduction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:research_agent.core.general_llm:Starting LLM chat completion request with single provider\n",
      "INFO:httpx:HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions \"HTTP/1.1 200 OK\"\n",
      "INFO:research_agent.core.general_llm:Successfully received and parsed LLM response\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'statement_abstract': 'The technological roadmap of multi-model large models has gained significant attention due to its potential to revolutionize fields like natural language processing, computer vision, and robotics. This survey aims to analyze the evolution of these models, evaluate methodological approaches and architectures, and propose future research directions.',\n",
       "  'evidence_spans': [0, 1],\n",
       "  'keywords': ['multi-model large models',\n",
       "   'natural language processing',\n",
       "   'computer vision',\n",
       "   'robotics',\n",
       "   'methodological approaches',\n",
       "   'architectures']},\n",
       " {'statement_abstract': 'The survey encompasses foundational theories of multi-modal learning, breakthroughs and paradigm shifts shaping the field, and recent advancements and emerging trends. It synthesizes key literature developments and identifies gaps to provide a comprehensive overview of the current state of multi-modal large models.',\n",
       "  'evidence_spans': [2, 3],\n",
       "  'keywords': ['foundational theories',\n",
       "   'multi-modal learning',\n",
       "   'breakthroughs',\n",
       "   'paradigm shifts',\n",
       "   'recent advancements',\n",
       "   'emerging trends',\n",
       "   'literature developments']},\n",
       " {'statement_abstract': 'The methodology involves a critical analysis of existing research, identification of key trends and patterns, and formulation of research questions and hypotheses. The temporal scope ranges from early developments in multi-modal learning to the most recent advancements.',\n",
       "  'evidence_spans': [4, 5],\n",
       "  'keywords': ['methodology',\n",
       "   'critical analysis',\n",
       "   'existing research',\n",
       "   'key trends',\n",
       "   'patterns',\n",
       "   'research questions',\n",
       "   'hypotheses',\n",
       "   'temporal scope']},\n",
       " {'statement_abstract': 'The survey uniquely focuses on the integration of diverse modalities and the development of innovative training techniques for multi-modal large models, aiming to contribute by providing a comprehensive overview, identifying key trends, and proposing future research directions.',\n",
       "  'evidence_spans': [6, 7],\n",
       "  'keywords': ['integration of diverse modalities',\n",
       "   'innovative training techniques',\n",
       "   'multi-modal large models',\n",
       "   'comprehensive overview',\n",
       "   'key trends',\n",
       "   'future research directions']}]"
      ]
     },
     "execution_count": 110,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "find_statementer = FindStatementCitation()\n",
    "await find_statementer.process_section(introduction,topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
